diff --git a/.editorconfig b/.editorconfig index 01eaf8ae6..e282a8e97 100644 --- a/.editorconfig +++ b/.editorconfig @@ -17,7 +17,7 @@ indent_style = tab indent_size = 4 [{*.yaml,*.yml,*.md}] -intent_style = space +indent_style = space indent_size = 2 [*.sh] diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6f64b5a33..d1dc5b195 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,22 +4,18 @@ # Order is important: the last matching pattern has the highest precedence # These owners will be the default owners for everything -* @cloudposse/engineering @cloudposse/contributors +* @cloudposse/engineering @cloudposse/admins -# Cloud Posse must review any changes to Makefiles -**/Makefile @cloudposse/engineering -**/Makefile.* @cloudposse/engineering +# Cloud Posse admins must review any changes to Makefiles +**/Makefile @cloudposse/admins +**/Makefile.* @cloudposse/admins -# Cloud Posse must review any changes to GitHub actions -.github/* @cloudposse/engineering +# Cloud Posse admins must review any changes to GitHub actions +.github/workflows/* @cloudposse/admins -# Cloud Posse must review any changes to standard context definition, -# but some changes can be rubber-stamped. -**/*.tf @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers -README.yaml @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers -README.md @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers -docs/*.md @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers -# Cloud Posse Admins must review all changes to CODEOWNERS or the mergify configuration +# Cloud Posse admins must review all changes to CODEOWNERS or the mergify or release configuration +.github/.github-update-disabled @cloudposse/admins +.github/auto-release.yml @cloudposse/admins .github/mergify.yml @cloudposse/admins .github/CODEOWNERS @cloudposse/admins diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index f3df96b5d..1722d9473 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,37 +1,43 @@ --- name: Bug report about: Create a report to help us improve -title: '' -labels: 'bug' -assignees: '' - +title: "" +labels: "bug" +assignees: "" --- -Found a bug? Maybe our [Slack Community](https://slack.cloudposse.com) can help. +Found a bug? Maybe our [Slack Community](https://slack.cloudposse.com) can help. [![Slack Community](https://slack.cloudposse.com/badge.svg)](https://slack.cloudposse.com) ## Describe the Bug + A clear and concise description of what the bug is. ## Expected Behavior + A clear and concise description of what you expected to happen. ## Steps to Reproduce + Steps to reproduce the behavior: + 1. Go to '...' 2. Run '....' 3. Enter '....' 4. See error ## Screenshots + If applicable, add screenshots or logs to help explain your problem. ## Environment (please complete the following information): Anything that will help us triage the bug will help. Here are some ideas: - - OS: [e.g. Linux, OSX, WSL, etc] - - Version [e.g. 10.15] + +- OS: [e.g. Linux, OSX, WSL, etc] +- Version [e.g. 10.15] ## Additional Context -Add any other context about the problem here. \ No newline at end of file + +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..f40fe2f56 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,72 @@ +--- +name: Bug report +description: Create a report to help us improve +labels: ["bug"] +assignees: [""] +body: + - type: markdown + attributes: + value: | + Found a bug? + + Please checkout our [Slack Community](https://slack.cloudposse.com) + or visit our [Slack Archive](https://archive.sweetops.com/). + + [![Slack Community](https://slack.cloudposse.com/badge.svg)](https://slack.cloudposse.com) + + - type: textarea + id: concise-description + attributes: + label: Describe the Bug + description: A clear and concise description of what the bug is. + placeholder: What is the bug about? + validations: + required: true + + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: A clear and concise description of what you expected. + placeholder: What happened? + validations: + required: true + + - type: textarea + id: reproduction-steps + attributes: + label: Steps to Reproduce + description: Steps to reproduce the behavior. + placeholder: How do we reproduce it? + validations: + required: true + + - type: textarea + id: screenshots + attributes: + label: Screenshots + description: If applicable, add screenshots or logs to help explain. + validations: + required: false + + - type: textarea + id: environment + attributes: + label: Environment + description: Anything that will help us triage the bug. + placeholder: | + - OS: [e.g. Linux, OSX, WSL, etc] + - Version [e.g. 10.15] + - Module version + - Terraform version + validations: + required: false + + - type: textarea + id: additional + attributes: + label: Additional Context + description: | + Add any other context about the problem here. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 76ae6d67a..918f371c1 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -15,4 +15,4 @@ contact_links: - name: DevOps Accelerator Program url: https://cloudposse.com/accelerate/ about: |- - Own your infrastructure in record time. We build it. You drive it. + Own your infrastructure in record time. We build it. You drive it. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 39a8686f1..5ec5bfc31 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,19 +1,19 @@ --- name: Feature Request about: Suggest an idea for this project -title: '' -labels: 'feature request' -assignees: '' - +title: "" +labels: "feature request" +assignees: "" --- -Have a question? Please checkout our [Slack Community](https://slack.cloudposse.com) or visit our [Slack Archive](https://archive.sweetops.com/). +Have a question? Please checkout our [Slack Community](https://slack.cloudposse.com) or visit our +[Slack Archive](https://archive.sweetops.com/). [![Slack Community](https://slack.cloudposse.com/badge.svg)](https://slack.cloudposse.com) ## Describe the Feature -A clear and concise description of what the bug is. +A clear and concise description of what the bug is. ## Expected Behavior @@ -21,7 +21,8 @@ A clear and concise description of what you expected to happen. ## Use Case -Is your feature request related to a problem/challenge you are trying to solve? Please provide some additional context of why this feature or capability will be valuable. +Is your feature request related to a problem/challenge you are trying to solve? Please provide some additional context +of why this feature or capability will be valuable. ## Describe Ideal Solution diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..44047f02e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,71 @@ +--- +name: Feature Request +description: Suggest an idea for this project +labels: ["feature request"] +assignees: [""] +body: + - type: markdown + attributes: + value: | + Have a question? + + Please checkout our [Slack Community](https://slack.cloudposse.com) + or visit our [Slack Archive](https://archive.sweetops.com/). + + [![Slack Community](https://slack.cloudposse.com/badge.svg)](https://slack.cloudposse.com) + + - type: textarea + id: concise-description + attributes: + label: Describe the Feature + description: A clear and concise description of what the feature is. + placeholder: What is the feature about? + validations: + required: true + + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: A clear and concise description of what you expected. + placeholder: What happened? + validations: + required: true + + - type: textarea + id: use-case + attributes: + label: Use Case + description: | + Is your feature request related to a problem/challenge you are trying + to solve? + + Please provide some additional context of why this feature or + capability will be valuable. + validations: + required: true + + - type: textarea + id: ideal-solution + attributes: + label: Describe Ideal Solution + description: A clear and concise description of what you want to happen. + validations: + required: true + + - type: textarea + id: alternatives-considered + attributes: + label: Alternatives Considered + description: Explain alternative solutions or features considered. + validations: + required: false + + - type: textarea + id: additional + attributes: + label: Additional Context + description: | + Add any other context about the problem here. + validations: + required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4b8f32df3..f5fb7d435 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,13 +1,21 @@ ## what -* Describe high-level what changed as a result of these commits (i.e. in plain-english, what do these changes mean?) -* Use bullet points to be concise and to the point. + + ## why -* Provide the justifications for the changes (e.g. business case). -* Describe why these changes were made (e.g. why do these commits fix the problem?) -* Use bullet points to be concise and to the point. + + ## references -* Link to any supporting github issues or helpful documentation to add some context (e.g. stackoverflow). -* Use `closes #123`, if this PR closes a GitHub issue `#123` + diff --git a/.github/auto-release.yml b/.github/auto-release.yml index 17cd39c82..c9336dbc6 100644 --- a/.github/auto-release.yml +++ b/.github/auto-release.yml @@ -37,8 +37,7 @@ categories: change-template: |
$TITLE @$AUTHOR (#$NUMBER) - - $BODY + $BODY
template: | diff --git a/.github/banner.png b/.github/banner.png new file mode 100644 index 000000000..6d94327be Binary files /dev/null and b/.github/banner.png differ diff --git a/.github/settings.yml b/.github/settings.yml new file mode 100644 index 000000000..941b10f30 --- /dev/null +++ b/.github/settings.yml @@ -0,0 +1,14 @@ +# These settings are synced to GitHub by https://probot.github.io/apps/settings/ +# Upstream changes from _extends are only recognized when modifications are made to this file in the default branch. +_extends: .github +repository: + # A URL with more information about the repository + homepage: https://docs.cloudposse.com/components/ + # Either `true` to enable projects for this repository, or `false` to disable them. + # If projects are disabled for the organization, passing `true` will cause an API error. + has_projects: false + # Either `true` to enable the wiki for this repository, `false` to disable it. + has_wiki: false + name: terraform-aws-components + description: Opinionated, self-contained Terraform root modules that each solve one, specific problem + topics: terraform, terraform-module, geodesic, reference-implementation, reference-architecture, aws, service-catalog, catalog, library, examples, terraform-modules, stacks, blueprints, itil, catalogue, components, component-library diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index 3a38fae08..0929676d2 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -8,19 +8,8 @@ on: - production jobs: - publish: - runs-on: ubuntu-latest - steps: - # Get PR from merged commit to master - - uses: actions-ecosystem/action-get-merged-pull-request@v1 - id: get-merged-pull-request - with: - github_token: ${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }} - # Drafts your next Release notes as Pull Requests are merged into "main" - - uses: release-drafter/release-drafter@v5 - with: - publish: ${{ !contains(steps.get-merged-pull-request.outputs.labels, 'no-release') }} - prerelease: false - config-name: auto-release.yml - env: - GITHUB_TOKEN: ${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }} + auto: + uses: cloudposse/.github/.github/workflows/shared-auto-release.yml@main + with: + publish: true + secrets: inherit diff --git a/.github/workflows/bats.yml b/.github/workflows/bats.yml index 6aa05718c..798006459 100644 --- a/.github/workflows/bats.yml +++ b/.github/workflows/bats.yml @@ -1,7 +1,7 @@ name: bats on: - pull_request_target: + pull_request: types: [labeled, opened, synchronize, unlabeled] jobs: @@ -16,13 +16,9 @@ jobs: BATS_SUBMODULE_TESTS: input-descriptions lint output-descriptions steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - repository: ${{ github.event.pull_request.head.repo.full_name }} - # Check out the PR commit, not the merge commit - # Use `ref` instead of `sha` to enable pushing back to `ref` - ref: ${{ github.event.pull_request.head.ref }} - name: Run tests on modified modules id: get-modified-files @@ -32,8 +28,8 @@ jobs: HEAD_REF: ${{ github.head_ref }} run: | # when running in test-harness, need to mark the directory safe for git operations - make safe-directory - MODIFIED_MODULES=($(git diff --name-only origin/${BASE_REF} origin/${HEAD_REF} | xargs -n 1 dirname | sort | uniq | grep ^modules/)) + make git-safe-directory + MODIFIED_MODULES=($(git diff --name-only origin/${BASE_REF} origin/${HEAD_REF} | xargs -n 1 dirname | sort | uniq | grep ^modules/ || true)) if [ -z "$MODIFIED_MODULES" ]; then echo "No modules changed in this PR. Skipping tests." exit 0 diff --git a/.github/workflows/pre-commit-check-and-autocommit-changes.yaml b/.github/workflows/pre-commit-check-and-autocommit-changes.yaml deleted file mode 100644 index 63b2f1b36..000000000 --- a/.github/workflows/pre-commit-check-and-autocommit-changes.yaml +++ /dev/null @@ -1,100 +0,0 @@ -name: pre-commit-check-and-autocommit-changes - -on: - pull_request_target: - types: [labeled, opened, synchronize, unlabeled] - -jobs: - run-pre-commit-checks-and-autocommit-changes: - runs-on: ubuntu-latest - if: github.event.pull_request.state == 'open' - steps: - - name: Privileged Checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - token: ${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - # Check out the PR commit, not the merge commit - # Use `ref` instead of `sha` to enable pushing back to `ref` - ref: ${{ github.event.pull_request.head.ref }} - - - name: Get List of Modified files - id: get-modified-files - shell: bash -x -e -o pipefail {0} - env: - BASE_REF: ${{ github.base_ref }} - HEAD_REF: ${{ github.head_ref }} - run: | - MODIFIED_FILES=$(git diff --name-only origin/${BASE_REF} origin/${HEAD_REF}) - if [ -z "$MODIFIED_FILES" ]; then - echo "No changed files detected on this branch? This must be an error." - exit 1 - else - echo "Running checks on the following files: ${MODIFIED_FILES}" - echo "::set-output name=modified_files::$(echo $MODIFIED_FILES)" - fi - - - name: Get Terraform Version - id: get-terraform-version - shell: bash -x -e -o pipefail {0} - env: - BASE_REF: ${{ github.base_ref }} - LABELS: ${{ join(github.event.pull_request.labels.*.name, '\n') }} - MODIFIED_FILES: ${{ steps.get-modified-files.outputs.modified_files }} - DEFAULT_TERRAFORM_VERSION: ${{ secrets.DEFAULT_TERRAFORM_VERSION }} - run: | - # Match labels like `terraform/0.12` or nothing (to prevent grep from returning a non-zero exit code) - # Use [0-9] because \d is not standard part of egrep - echo "PR labels: ${LABELS}" - TERRAFORM_VERSION=$(grep -Eo 'terraform/[0-9]+\.[x0-9]+|' <<<${LABELS} | cut -d/ -f2) - # Go through all the possible cases: one compliant label, no compliant labels, and multiple compliant labels - if grep -Ez '^\W*[0-9]+\.[x0-9]+\W*$' <<<${TERRAFORM_VERSION}; then - echo "Terraform version ${TERRAFORM_VERSION} will be used to check the formatting of files that have been modified since this branch diverged from ${BASE_REF}." - elif [ -z "${TERRAFORM_VERSION}" ]; then - TERRAFORM_VERSION="${DEFAULT_TERRAFORM_VERSION:-1.x}" - echo "No Terraform version found in the PR labels. Using the default Terraform version ${TERRAFORM_VERSION}." - else - echo "You have either chosen terraform labels with malformed versions or, more likely, you have chosen multiple terraform version labels." - echo "Please select a single terraform version label that matches the following regular expression: terraform/[0-9]+\.[x0-9]+" - exit 2 - fi - # Construct the actual semver expression that will be passed to Terraform - if grep -q 'x$' <<<${TERRAFORM_VERSION}; then - TERRAFORM_SEMVER=$TERRAFORM_VERSION - else - TERRAFORM_SEMVER=~$TERRAFORM_VERSION - fi - # Create GitHub Actions step output - echo "::set-output name=terraform_semver::$(echo $TERRAFORM_SEMVER)" - - # Install terraform to ensure we're using our expected version - - uses: hashicorp/setup-terraform@v1 - with: - terraform_version: ${{ steps.get-terraform-version.outputs.terraform_semver }} - - # Install terraform-docs for pre-commit hook - - name: Install terraform-docs - shell: bash - env: - INSTALL_PATH: "${{ github.workspace }}/bin" - run: | - make init - mkdir -p "${INSTALL_PATH}" - make packages/install/terraform-docs - echo "$INSTALL_PATH" >> $GITHUB_PATH - - # python setup, in preparation for pre-commit run - - uses: actions/setup-python@v2 - - # pre-commit checks: fmt + terraform-docs - # We skip tf_validate as it requires an init - # of all root modules, which is to be avoided. - - uses: cloudposse/github-action-pre-commit@v2.1.2 - env: - SKIP: tf_validate - with: - token: ${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }} - git_user_name: cloudpossebot - git_user_email: cloudpossebot@users.noreply.github.com - extra_args: --files ${{ steps.get-modified-files.outputs.modified_files }} diff --git a/.github/workflows/scheduled.yml b/.github/workflows/scheduled.yml new file mode 100644 index 000000000..7bc09ab9d --- /dev/null +++ b/.github/workflows/scheduled.yml @@ -0,0 +1,16 @@ +--- +name: scheduled +on: + workflow_dispatch: { } # Allows manually trigger this workflow + schedule: + - cron: "0 3 * * *" + +permissions: + pull-requests: write + id-token: write + contents: write + +jobs: + scheduled: + uses: cloudposse/github-actions-workflows-terraform-module/.github/workflows/scheduled.yml@main + secrets: inherit diff --git a/.github/workflows/update-changelog.yml b/.github/workflows/update-changelog.yml new file mode 100644 index 000000000..f11954fe9 --- /dev/null +++ b/.github/workflows/update-changelog.yml @@ -0,0 +1,174 @@ +name: "Update Changelog" + +on: + release: + types: + - published + +permissions: + id-token: write + contents: write + pull-requests: write + +jobs: + update-changelog: + runs-on: ["ubuntu-latest"] + steps: + - name: Current Release + id: current-release + uses: actions/github-script@v6 + with: + script: | + const event = ${{ toJSON(github.event) }}; + + const tag = event.release.tag_name; + const body = event.release.body; + + console.log(`Current release tag: ${tag}`); + console.log(`Current release body: ${body}`); + + core.setOutput('tag', tag); + core.setOutput('body', Buffer.from(body).toString('base64')); + + - name: Previous Release + id: previous-release + uses: actions/github-script@v6 + with: + script: | + const releases = await github.rest.repos.listReleases({ + ...context.repo + }); + + const currentReleaseIndex = releases.data.findIndex(release => "${{ steps.current-release.outputs.tag }}" === context.payload.release.tag_name); + + const previousRelease = releases.data[currentReleaseIndex + 1]; + const tag = previousRelease.tag_name; + + console.log(`Previous release tag: ${tag}`); + + core.setOutput('tag', tag); + + - name: Checkout Current Release + uses: actions/checkout@v3 + with: + ref: "${{ steps.current-release.outputs.tag }}" + path: current + + - name: Checkout Previous Release + uses: actions/checkout@v3 + with: + ref: "${{ steps.previous-release.outputs.tag }}" + path: previous + + - name: Find Updated CHANGELOG.md files + id: updated + uses: actions/github-script@v6 + with: + script: | + const path = require('path'); + const fs = require('fs'); + const crypto = require('crypto'); + + function findChangelogs(dir, fileList = []) { + const files = fs.readdirSync(dir); + + files.forEach(file => { + if (fs.statSync(path.join(dir, file)).isDirectory()) { + fileList = findChangelogs(path.join(dir, file), fileList); + } else if (file === 'CHANGELOG.md') { + fileList.push(path.join(dir, file)); + } + }); + + return fileList; + } + + function calculateHash(file) { + const hash = crypto.createHash('md5'); + const fileContent = fs.readFileSync(file); + + hash.update(fileContent); + + return hash.digest('hex'); + } + + function trimPath(relativePath) { + return relativePath + .replace('modules/', '') + .replace('/CHANGELOG.md', ''); + } + + const currentChangeLogFiles = findChangelogs('./current/modules'); + const components = []; + + for (let i = 0; i < currentChangeLogFiles.length; i++) { + const currentReleaseFile = currentChangeLogFiles[i]; + const relativePath = currentReleaseFile.replace(/^current\//, ''); + const previousReleaseFile = `previous/${relativePath}` + + if (!fs.existsSync(previousReleaseFile)) { + console.log(`New CHANGELOG.md found: ${relativePath}`); + components.push(trimPath(relativePath)); + continue; + } + + if (calculateHash(currentReleaseFile) !== calculateHash(previousReleaseFile)) { + console.log(`CHANGELOG.md changed: ${relativePath}`); + components.push(trimPath(relativePath)); + } else { + console.log(`${relativePath} didn't change. Skipping ...`); + } + } + + core.setOutput('components', JSON.stringify(components)); + + - name: Checkout + uses: actions/checkout@v3 + + - name: Generate Changelog + uses: actions/github-script@v6 + with: + script: | + const fs = require('fs'); + + const tag = "${{ steps.current-release.outputs.tag }}"; + let body = Buffer.from("${{ steps.current-release.outputs.body }}", 'base64').toString('utf-8'); + const updatedComponents = JSON.parse(`${{ steps.updated.outputs.components }}`); + + let affectedComponents = ""; + if (updatedComponents.length > 0) { + affectedComponents += "\n\n"; + affectedComponents += "## Affected Components\n"; + + for (let i = 0; i < updatedComponents.length; i++) { + const relativePath = updatedComponents[i]; + affectedComponents += `- [${relativePath}](https://docs.cloudposse.com/components/library/aws/${relativePath}#changelog)\n` + } + + affectedComponents += "\n\n"; + } + + const content = `\n## ${tag}\n\n${affectedComponents}\n\n${body}\n` + + console.log(content); + + const filePath = 'CHANGELOG.md' + const fileContent = fs.readFileSync(filePath, 'utf-8'); + const lines = fileContent.split('\n'); + + lines.splice(1, 0, content); + + const updatedContent = lines.join('\n'); + fs.writeFileSync(filePath, updatedContent, 'utf-8'); + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v6 + with: + title: 'Update Changelog for `${{ steps.current-release.outputs.tag }}`' + body: 'Update Changelog for [`${{ steps.current-release.outputs.tag }}`](${{ github.event.release.html_url }})' + base: main + branch: "changelog/${{ steps.current-release.outputs.tag }}" + delete-branch: "true" + commit-message: "Update Changelog for ${{ steps.current-release.outputs.tag }}" + labels: | + no-release diff --git a/.gitignore b/.gitignore index 23c3bd466..0b987f3b3 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ build-harness/ aws-assumed-role/ .idea/ *.iml +docs/terraform.md vendir.lock.yml @@ -189,3 +190,7 @@ dmypy.json # Cython debug symbols cython_debug/ + +*.backup + +default.auto.tfvars diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2affe7454..1aaef70c4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,21 +1,61 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.3.0 + rev: v4.4.0 hooks: - - id: check-yaml + # Git style + - id: check-added-large-files # prevents giant files from being committed. + - id: forbid-new-submodules # prevents addition of new git submodules. + - id: no-commit-to-branch # don't commit to branch + + # Common errors + - id: trailing-whitespace # trims trailing whitespace. + args: [--markdown-linebreak-ext=md] + - id: end-of-file-fixer # ensures that a file is either empty, or ends with one newline. + - id: check-merge-conflict # checks for files that contain merge conflict strings. + - id: check-executables-have-shebangs # ensures that (non-binary) executables have a shebang. + + # Cross platform + - id: check-case-conflict # checks for files that would conflict in case-insensitive filesystems. + - id: mixed-line-ending # replaces or checks mixed line ending. + args: [--fix=lf] + + # YAML + - id: check-yaml # checks yaml files for parseable syntax. exclude: | (?x)^( + deprecated/eks/.*/charts/.*/templates/.*.yaml deprecated/github-actions-runner/runners/actions-runner/chart/templates/.*.yaml | modules/eks/cert-manager/cert-manager-issuer/templates/.*.yaml | modules/strongdm/charts/strongdm/templates/.*.yaml | - modules/eks/.*/charts/.*/templates/.*.yaml + modules/eks/.*/charts/.*/templates/.*.yaml | + modules/eks/.*/charts/.*/templates/.*.yml | + modules/eks/promtail/scrape_config/.*.yaml | + modules/eks/promtail/scrape_config/.*.yml )$ + - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.75.0 + rev: v1.80.0 hooks: - id: terraform_fmt - id: terraform_docs args: ["--args=--lockfile=false"] + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v3.1.0 + hooks: + - id: prettier + name: prettier + entry: prettier --write --prose-wrap always --print-width 120 + types: ["markdown"] + # If prettier chokes on the output of the `terraform-docs` command for a file, don't + # exclude it here. Instead, wrap the terraform-docs comment in the README with + # `` and `` comments. + exclude: | + (?x)^( + README.md | + deprecated/.*.md + )$ + - repo: local hooks: - id: rebuild-mixins-docs @@ -25,4 +65,3 @@ repos: types: ["text"] files: (mixins\/.*|bin\/rebuild-mixins-docs\.sh) pass_filenames: false - diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..636aff9f2 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,5386 @@ +# CHANGELOG + +## 1.505.0 + + + +
+ fix: account-quota drift reduced @dudymas (#1102) +## what + +- encode values into a `for_each` on service quota resources + +## why + +- terraform sometimes gets bad state back from the AWS API, so fetched results +ought to be ignored. Instead, input values should be respected as truth. + +## references + +- AWS CLI + [command to list service quotas](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/service-quotas/list-service-quotas.html) `aws service-quotas list-service-quotas`. + Note where it says "For some quotas, only the default values are available." +- [Medium article](https://medium.com/@jsonk/the-limit-does-not-exist-hidden-visibility-of-aws-service-limits-4b786f846bc0) + explaining how many AWS service limits are not available. + + +
+ +
+ Update Changelog for `1.504.0` @github-actions (#1128) +Update Changelog for [`1.504.0`](https://github.com/cloudposse/terraform-aws-components/releases/tag/1.504.0) +
+ + + +## 1.504.0 + + + +
+ feat: allow vulnerability scanning of Argo repository and implement ignore changes for non-change drift @RoseSecurity (#1120) +## what + +- Attempted to refactor code to ensure changes don't occur on each run (did not resolve) +- Opened an issue with [GitHub](https://github.com/integrations/terraform-provider-github/issues/2243) but is still in the triaging state +- This is a quick fix for addressing the following non-change + +```console +Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + ~ update in-place + +Terraform will perform the following actions: + + # github_branch_protection.default[0] will be updated in-place + ~ resource "github_branch_protection" "default" { + id = "XXXXXXX" + # (10 unchanged attributes hidden) + + ~ restrict_pushes { + ~ push_allowances = [ + + "XXXXXXX", + ] +``` + +## why + +- [X] Adds lifecycle meta-argument for ignoring changes to `push_allowances` +- [X] Enable vulnerability alerting for vulnerable dependencies by default to address `tfsec` findings + +## Testing + +- [X] Validated with `atmos validate stacks` +- [X] Performed successful `atmos terraform deploy` on component + +
+ +
+ Update Changelog for `1.502.0` @github-actions (#1126) +Update Changelog for [`1.502.0`](https://github.com/cloudposse/terraform-aws-components/releases/tag/1.502.0) +
+ + + +## 1.502.0 + + + +
+ upstream `tailscale` @Benbentwo (#835) +## what +* Initial Tailscale deployment + +## why +* tailscale operators + +## references +* https://github.com/tailscale/tailscale/tree/main/docs/k8s + +
+ +
+ Update Changelog for `1.501.0` @github-actions (#1125) +Update Changelog for [`1.501.0`](https://github.com/cloudposse/terraform-aws-components/releases/tag/1.501.0) +
+ +
+ docs: improve external-dns snippet in readme @sgtoj (#986) +## what + +- update the `eks/external-dns` component example in readme + - set latest chart version + - set the resource configure properly + - add `txt_prefix` var to snippet + +## why + +- help the future engineers deploying or updating external-dns + +## references + +- n/a + +
+ +
+ Update Changelog for `1.500.0` @github-actions (#1124) +Update Changelog for [`1.500.0`](https://github.com/cloudposse/terraform-aws-components/releases/tag/1.500.0) +
+ + + +## 1.501.0 + +
+ Fix release changelog space issue @goruha (#1122) +## what +* Fix release changelog space issue + +![CleanShot 2024-10-01 at 12 27 42@2x](https://github.com/user-attachments/assets/2d42740a-1d5d-4990-94ac-eb49bdfe4c32) + +## why +* Have nice changelog + +## references +* https://github.com/cloudposse/terraform-aws-components/pull/1117/files#diff-06572a96a58dc510037d5efa622f9bec8519bc1beab13c9f251e97e657a9d4edR10 + + +## 1.500.0 + + + +## Affected Components +- [eks/argocd](https://docs.cloudposse.com/components/library/aws/eks/argocd#changelog) +- [eks/cluster](https://docs.cloudposse.com/components/library/aws/eks/cluster#changelog) +- [eks/datadog-agent](https://docs.cloudposse.com/components/library/aws/eks/datadog-agent#changelog) +- [eks/github-actions-runner](https://docs.cloudposse.com/components/library/aws/eks/github-actions-runner#changelog) +- [spa-s3-cloudfront](https://docs.cloudposse.com/components/library/aws/spa-s3-cloudfront#changelog) + + +
+ add additional waf features @mcalhoun (#791) + + ## what +* Add the ability to specify a list of ALBs to attach WAF to +* Add the ability to specify a list of tags to target ALBs to attach WAF to + +## why +* To provider greater flexibility in attaching WAF to ALBs +
+ +
+ Update Changelog for `1.499.0` @github-actions (#1123) + + Update Changelog for [`1.499.0`](https://github.com/cloudposse/terraform-aws-components/releases/tag/1.499.0) +
+ +
+ docs: fix typos using `codespell` @RoseSecurity (#1114) + + ## what and why + +> [!NOTE] +> Feel free to close this PR if the changes are not worth the review. I won't be offended + +- For context, I wanted to clean up some of the documentation in our repository, which identified several typos in our variables and READMEs. I decided to use `codespell` to automate this process and thought it might be useful for a quick cleanup here! + +### usage + +```sh +codespell -w +``` + +
+ + + +## 1.499.0 + + + +
+ feat: add detector features to guard duty component @dudymas (#1112) + + ## what + +- add detector features to guard duty + +## why + +- added functionality + +## references + +- [Detector Feature API](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) + +
+ +
+ Update Changelog for `1.497.0` @github-actions (#1117) + + Update Changelog for [`1.497.0`](https://github.com/cloudposse/terraform-aws-components/releases/tag/1.497.0) +
+ + + +## 1.497.0 + + + +
+ Fix Update changelog workflow @goruha (#1116) + + ## what +* Fix modules path from `components/terraform` to `modules` + +## why +* It seems that `components/terraform` was testing value. In actual repo components are in `modules` directory + +## references +* DEV-2556 Investigate release issues with terraform-aws-components +
+ + + +## 1.298.0 (2023-08-28T20:56:25Z) + +
+ Aurora Postgres Engine Options @milldr (#845) + +### what + +- Add scaling configuration variables for both Serverless and Serverless v2 to `aurora-postgres` +- Update `aurora-postgres` README + +### why + +- Support both serverless options +- Add an explanation for how to configure each, and where to find valid engine options + +### references + +- n/a + +
+ +## 1.297.0 (2023-08-28T18:06:11Z) + +
+ AWS provider V5 dependency updates @max-lobur (#729) + +### what + +- Update component dependencies for the AWS provider V5 + +Requested components: + +- cloudtrail-bucket +- config-bucket +- datadog-logs-archive +- eks/argocd +- eks/efs-controller +- eks/metric-server +- spacelift-worker-pool +- eks/external-secrets-operator + +### why + +- Maintenance + +
+ +## 1.296.0 (2023-08-28T16:24:05Z) + +
+ datadog agent update defaults @Benbentwo (#839) + +### what + +- prevent fargate agents +- use sockets instead of ports for APM +- enable other services + +### why + +- Default Datadog APM enabled over k8s + +### references + +
+ +## 1.295.0 (2023-08-26T00:51:10Z) + +
+ TGW FAQ and Spoke Alternate VPC Support @milldr (#840) + +### what + +- Added FAQ to the TGW upgrade guide for replacing attachments +- Added note about destroying TGW components +- Added option to not create TGW propagation and association when connecting an alternate VPC + +### why + +- When connecting an alternate VPC in the same region as the primary VPC, we do not want to create a duplicate TGW + propagation and association + +### references + +- n/a + +
+ +## 1.294.0 (2023-08-26T00:07:42Z) + +
+ Aurora Upstream: Serverless, Tags, Enabled: False @milldr (#841) + +### what + +- Set `module.context` to `module.cluster` across all resources +- Only set parameter for replica if cluster size is > 0 +- `enabled: false` support + +### why + +- Missing tags for SSM parameters for cluster attributes +- Serverless clusters set `cluster_size: 0`, which will break the SSM parameter for replica hostname (since it does not + exist) +- Support enabled false for `aurora-*-resources` components + +### references + +- n/a + +
+ +## 1.293.2 (2023-08-24T15:50:53Z) + +### πŸš€ Enhancements + +
+ Update `root_stack` output in `modules/spacelift/admin-stack/outputs.tf` @aknysh (#837) + +### what + +- Update `root_stack` output in `modules/spacelift/admin-stack/outputs.tf` + +### why + +- Fix the issue described in https://github.com/cloudposse/terraform-aws-components/issues/771 + +### related + +- Closes https://github.com/cloudposse/terraform-aws-components/issues/771 + +
+ +## 1.293.1 (2023-08-24T11:24:46Z) + +### πŸ› Bug Fixes + +
+ [spacelift/worker-pool] Update providers.tf nesting @Nuru (#834) + +### what + +- Update relative path to `account-map` in `spacelift/worker-pool/providers.tf` + +### why + +- Fixes #828 + +
+ +## 1.293.0 (2023-08-23T01:18:53Z) + +
+ Add visibility to default VPC component name @milldr (#833) + +### what + +- Set the default component name for `vpc` in variables, not remote-state + +### why + +- Bring visibility to where the default is set + +### references + +- Follow up on comments on #832 + +
+ +## 1.292.0 (2023-08-22T21:33:18Z) + +
+ Aurora Optional `vpc` Component Names @milldr (#832) + +### what + +- Allow optional VPC component names in the aurora components + +### why + +- Support deploying the clusters for other VPC components than `"vpc"` + +### references + +- n/a + +
+ +## 1.291.1 (2023-08-22T20:25:17Z) + +### πŸ› Bug Fixes + +
+ [aws-sso] Fix root provider, restore `SetSourceIdentity` permission @Nuru (#830) + +### what + +For `aws-sso`: + +- Fix root provider, improperly restored in #740 +- Restore `SetSourceIdentity` permission inadvertently removed in #740 + +### why + +- When deploying to `identity`, `root` provider did not reference `root` account +- Likely unintentional removal due to merge error + +### references + +- #740 +- #738 + +
+ +## 1.291.0 (2023-08-22T17:08:27Z) + +
+ chore: remove defaults from components @dudymas (#831) + +### what + +- remove `defaults.auto.tfvars` from component modules + +### why + +- in favor of drying up configuration using atmos + +### Notes + +- Some defaults may not be captured yet. Regressions might occur. + +
+ +## 1.290.0 (2023-08-21T18:57:43Z) + +
+ Upgrade aws-config and conformance pack modules to 1.1.0 @johncblandii (#829) + +### what + +- Upgrade aws-config and conformance pack modules to 1.1.0 + +### why + +- They're outdated. + +### references + +- #771 + +
+ +## 1.289.2 (2023-08-21T08:53:08Z) + +### πŸ› Bug Fixes + +
+ [eks/alb-controller] Fix naming convention of overridable local variable @Nuru (#826) + +### what + +- [eks/alb-controller] Change name of local variable from `distributed_iam_policy_overridable` to + `overridable_distributed_iam_policy` + +### why + +- Cloud Posse style guide requires `overridable` as prefix, not suffix. + +
+ +## 1.289.1 (2023-08-19T05:20:26Z) + +### πŸ› Bug Fixes + +
+ [eks/alb-controller] Update ALB controller IAM policy @Nuru (#821) + +### what + +- [eks/alb-controller] Update ALB controller IAM policy + +### why + +- Previous policy had error preventing the creation of the ELB service-linked role + +
+ +## 1.289.0 (2023-08-18T20:18:12Z) + +
+ Spacelift Alternate git Providers @milldr (#825) + +### what + +- set alternate git provider blocks to filter under `settings.spacelift` + +### why + +- Debugging GitLab support specifically +- These settings should be defined under `settings.spacelift`, not as a top-level configuration + +### references + +- n/a + +
+ +## 1.288.0 (2023-08-18T15:12:16Z) + +
+ Placeholder for `upgrade-guide.md` @milldr (#823) + +### what + +- Added a placeholder file for `docs/upgrade-guide.md` with a basic explanation of what is to come + +### why + +- With #811 we moved the contents of this upgrade-guide file to the individual component. We plan to continue adding + upgrade guides for individual components, and in addition, create a higher-level upgrade guide here +- However, the build steps for refarch-scaffold expect `docs/upgrade-guide.md` to exist and are failing without it. We + need a placeholder until the `account-map`, etc changes are added to this file + +### references + +- Example of failing release: https://github.com/cloudposse/refarch-scaffold/actions/runs/5885022872 + +
+ +## 1.287.2 (2023-08-18T14:42:49Z) + +### πŸš€ Enhancements + +
+ update boolean logic @mcalhoun (#822) + +### what + +- Update the GuardDuty component to enable GuardDuty on the root account + +### why + +The API call to designate organization members now fails with the following if GuardDuty was not already enabled in the +organization management (root) account : + +``` +Error: error designating guardduty administrator account members: [{ +β”‚ AccountId: "111111111111, +β”‚ Result: "Operation failed because your organization master must first enable GuardDuty to be added as a member" +β”‚ }] +``` + +
+ +## 1.287.1 (2023-08-17T16:41:24Z) + +### πŸš€ Enhancements + +
+ chore: Remove unused + @MaxymVlasov (#818) + +# why + +``` +TFLint in components/terraform/eks/cluster/: +2 issue(s) found: + +Warning: [Fixable] local.identity_account_name is declared but not used (terraform_unused_declarations) + + on main.tf line 9: + 9: identity_account_name = module.iam_roles.identity_account_account_name + +Reference: https://github.com/terraform-linters/tflint-ruleset-terraform/blob/v0.4.0/docs/rules/terraform_unused_declarations.md + +Warning: [Fixable] variable "aws_teams_rbac" is declared but not used (terraform_unused_declarations) + + on variables.tf line 117: + 117: variable "aws_teams_rbac" { + +Reference: https://github.com/terraform-linters/tflint-ruleset-terraform/blob/v0.4.0/docs/rules/terraform_unused_declarations.md +``` + +
+ +## 1.287.0 (2023-08-17T15:52:57Z) + +
+ Update `remote-states` modules to the latest version @aknysh (#820) + +### what + +- Update `remote-states` modules to the latest version + +### why + +- `remote-state` version `1.5.0` uses the latest version of `terraform-provider-utils` which uses the latest version of + Atmos with many new features and improvements + +
+ +## 1.286.0 (2023-08-17T05:49:45Z) + +
+ Update cloudposse/utils/aws to 1.3.0 @RoseSecurity (#815) + +### What: + +- Updated the following to utilize the newest version of `cloudposse/utils/aws`: + +``` +0.8.1 modules/spa-s3-cloudfront +1.1.0 modules/aws-config +1.1.0 modules/datadog-configuration/modules/datadog_keys +1.1.0 modules/dns-delegated +``` + +### Why: + +- `cloudposse/utils/aws` components were not updated to `1.3.0` + +### References: + +- [AWS Utils](https://github.com/cloudposse/terraform-aws-utils/releases/tag/1.3.0) + +
+ +## 1.285.0 (2023-08-17T05:49:09Z) + +
+ Update api-gateway-account-settings README.md @johncblandii (#819) + +### what + +- Updated the title + +### why + +- It was an extra helping of copy/pasta + +### references + +
+ +## 1.284.0 (2023-08-17T02:10:47Z) + +
+ Datadog upgrades @Nuru (#814) + +### what + +- Update Datadog components: + - `eks/datadog-agent` see `eks/datadog-agent/CHANGELOG.md` + - `datadog-configuration` better handling of `enabled = false` + - `datadog-integration` move "module count" back to "module" for better compatibility and maintainability, see + `datadog-integration/CHANGELOG.md` + - `datadog-lambda-forwared` fix issues around `enable = false` and incomplete destruction of resources (particularly + log groups) see `datadog-lambda-forwarder/CHANGELOG.md` + - Cleanup `datadog-monitor` see `datadog-monitor/CHANGELOG.md` for details. Possible breaking change in that several + inputs have been removed, but they were previously ignored anyway, so no infrastructure change should result from + you simply removing any inputs you had for the removed inputs. + - Update `datadog-sythetics` dependency `remote-state` version + - `datadog-synthetics-private-location` migrate control of namespace to `helm-release` module. Possible destruction + and recreation of component on upgrade. See CHANGELOG.md + +### why + +- More reliable deployments, especially when destroying or disabling them +- Bug fixes and new features + +
+ +## 1.283.0 (2023-08-16T17:23:39Z) + +
+ Update EC2-Autoscale-Group Modules to 0.35.1 @RoseSecurity (#809) + +### What: + +- Updated `modules/spacelift/worker-pool` from 0.34.2 to 0.35.1 and adapted new variable features +- Updated `modules/bastion` from 0.35.0 to 0.35.1 +- Updated `modules/github-runners` from 0.35.0 to 0.35.1 + +### Why: + +- Modules were utilizing previous `ec2-autoscale-group` versions + +### References: + +- [terraform-aws-ec2-autoscale-group](https://github.com/cloudposse/terraform-aws-ec2-autoscale-group/blob/main/variables.tf) +- [Terraform Registry](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group#instance_refresh) + +
+ +
+ Update storage-class efs component documentation @max-lobur (#817) + +### what + +- Update storage-class efs component defaults + +### why + +- Follow component move outside of eks dir + +
+ +## 1.282.1 (2023-08-15T21:48:02Z) + +### πŸ› Bug Fixes + +
+ Karpenter bugfix, EKS add-ons to managed node group @Nuru (#816) + +### what + +- [eks/karpenter] use Instance Profile name from EKS output +- Clarify recommendation and fix defaults regarding deploying add-ons to managed node group + +### why + +- Bug fix: Karpenter did not work when legacy mode disabled +- Originally we expected to use Karpenter-only clusters and the documentation and defaults aligned with this. Now we + recommend all Add-Ons be deployed to a managed node group, but the defaults and documentation did not reflect this. + +
+ +## 1.282.0 (2023-08-14T16:05:08Z) + +
+ Upstream the latest ecs-service module @goruha (#810) + +### what + +- Upstream the latest `ecs-service` component + +### why + +- Support ecspresso deployments +- Support s3 task definition mirroring +- Support external ALB/NLN components + +
+ +## 1.281.0 (2023-08-14T09:10:42Z) + +
+ Refactor Changelog @milldr (#811) + +### what + +- moved changelog for individual components +- changed title + +### why + +- Title changelogs consistently by components version +- Separate changes by affected components + +### references + +- https://github.com/cloudposse/knowledge-base/discussions/132 + +
+ +## 1.280.1 (2023-08-14T08:06:42Z) + +### πŸš€ Enhancements + +
+ Fix eks/cluster default values @Nuru (#813) + +### what + +- Fix eks/cluster `node_group_defaults` to default to legal (empty) values for `kubernetes_labels` and + `kubernetes_taints` +- Increase eks/cluster managed node group default disk size from 20 to 50 GB + +### why + +- Default values should be legal values or else they are not really defaults +- Nodes were running out of disk space just hosting daemon set pods at 20 GB + +
+ +## 1.280.0 (2023-08-11T20:13:45Z) + +
+ Updated ssm parameter versions @RoseSecurity (#812) + +### Why: + +- `cloudposse/ssm-parameter-store/aws` was out of date +- There are no new [changes](https://github.com/cloudposse/terraform-aws-ssm-parameter-store/releases/tag/0.11.0) + incorporated but just wanted to standardize new modules to updated version + +### What: + +- Updated the following to `v0.11.0`: + +``` +0.10.0 modules/argocd-repo +0.10.0 modules/aurora-mysql +0.10.0 modules/aurora-postgres +0.10.0 modules/datadog-configuration +0.10.0 modules/eks/platform +0.10.0 modules/opsgenie-team/modules/integration +0.10.0 modules/ses +0.9.1 modules/datadog-integration +``` + +
+ +## 1.279.0 (2023-08-11T16:39:01Z) + +
+ fix: restore argocd notification ssm lookups @dudymas (#764) + +### what + +- revert some changes to `argocd` component +- connect argocd notifications with ssm secrets +- remove `deployment_id` from `argocd-repo` component +- correct `app_hostname` since gha usually adds protocol + +### why + +- regressions with argocd notifications caused github actions to timeout +- `deployment_id` no longer needed for fascilitating communication between gha and ArgoCD +- application urls were incorrect and problematic during troubleshooting + +
+ +## 1.278.0 (2023-08-09T21:54:09Z) + +
+ Upstream `eks/keda` @milldr (#808) + +### what + +- Added the component `eks/keda` + +### why + +- We've deployed KEDA for a few customers now and the component should be upstreamed + +### references + +- n/a + +
+ +## 1.277.0 (2023-08-09T20:39:21Z) + +
+ Added Inputs for `elasticsearch` and `cognito` @milldr (#786) + +### what + +- Added `deletion_protection` for `cognito` +- Added options for dedicated master for `elasticsearch` + +### why + +- Allow the default options to be customized + +### references + +- Customer requested additions + +
+ +## 1.276.1 (2023-08-09T20:30:36Z) + +
+ Update upgrade-guide.md Version @milldr (#807) + +### what + +- Set the version to the correct updated release + +### why + +- Needs to match correct version + +### references + +#804 + +
+ +### πŸš€ Enhancements + +
+ feat: allow email to be configured at account level @sgtoj (#799) + +### what + +- allow email to be configured at account level + +### why + +- to allow importing existing accounts with email address that does not met the organization standard naming format + +### references + +- n/a + +
+ +## 1.276.0 (2023-08-09T16:38:40Z) + +
+ Transit Gateway Cross-Region Support @milldr (#804) + +### what + +- Upgraded `tgw` components to support cross region connections +- Added back `tgw/cross-region-hub-connector` with overhaul to support updated `tgw/hub` component + +### why + +- Deploy `tgw/cross-region-hub-connector` to create peered TGW hubs +- Use `tgw/hub` both for in region and intra region connections + +### references + +- n/a + +
+ +## 1.275.0 (2023-08-09T02:53:39Z) + +
+ [eks/cluster] Proper handling of cold start and enabled=false @Nuru (#806) + +### what + +- Proper handling of cold start and `enabled=false` + +### why + +- Fixes #797 +- Supersedes and closes #798 +- Cloud Posse standard requires error-free operation and no resources created when `enabled` is `false`, but previously + this component had several errors + +
+ +## 1.274.2 (2023-08-09T00:13:36Z) + +### πŸš€ Enhancements + +
+ Added Enabled Parameter to aws-saml/okta-user and datadog-synthetics-private-location @RoseSecurity (#805) + +### What: + +- Added `enabled` parameter for `modules/aws-saml/modules/okta-user/main.tf` and + `modules/datadog-private-location-ecs/main.tf` + +### Why: + +- No support for disabling the creation of the resources + +
+ +## 1.274.1 (2023-08-09T00:11:55Z) + +### πŸš€ Enhancements + +
+ Updated Security Group Component to 2.2.0 @RoseSecurity (#803) + +### What: + +- Updated `bastion`, `redshift`, `rds`, `spacelift`, and `vpc` to utilize the newest version of + `cloudposse/security-group/aws` + +### Why: + +- `cloudposse/security-group/aws` components were not updated to `2.2.0` + +### References: + +- [AWS Security Group Component](https://github.com/cloudposse/terraform-aws-security-group/compare/2.0.0-rc1...2.2.0) + +
+ +## 1.274.0 (2023-08-08T17:03:41Z) + +
+ bug: update descriptions *_account_account_name variables @sgtoj (#801) + +### what + +- update descriptions `*_account_account_name` variables + - I replaced `stage` with `short` because that is the description used for the respective `outputs` entries + +### why + +- to help future implementers of CloudPosse's architectures + +### references + +- n/a + +
+ +## 1.273.0 (2023-08-08T17:01:23Z) + +
+ docs: fix issue with eks/cluster usage snippet @sgtoj (#796) + +### what + +- update usage snippet in readme for `eks/cluster` component + +### why + +- fix incorrect shape for one of the items in `aws_team_roles_rbac` +- improve consistency +- remove variables that are not applicable for the component + +### references + +- n/a + +
+ +## 1.272.0 (2023-08-08T17:00:32Z) + +
+ feat: filter out β€œSUSPENDED” accounts for account-map @sgtoj (#800) + +### what + +- filter out β€œSUSPENDED” accounts (aka accounts in waiting period for termination) for `account-map` component + +### why + +- suspended account cannot be used, so therefore it should not exist in the account-map +- allows for new _active_ accounts with same exact name of suspended account to exists and work with `account-map` + +### references + +- n/a + +
+ +## 1.271.0 (2023-08-08T16:44:18Z) + +
+ `eks/karpenter` Readme.md update @Benbentwo (#792) + +### what + +- Adding Karpenter troubleshooting to readme +- Adding https://endoflife.date/amazon-eks to `EKS/Cluster` + +### references + +- https://karpenter.sh/docs/troubleshooting/ +- https://endoflife.date/amazon-eks + +
+ +## 1.270.0 (2023-08-07T21:54:49Z) + +
+ [eks/cluster] Add support for BottleRocket and EFS add-on @Nuru (#795) + +### what + +- Add support for EKS EFS add-on +- Better support for Managed Node Group's Block Device Storage +- Deprecate and ignore `aws_teams_rbac` and remove `identity` roles from `aws-auth` +- Support `eks/cluster` provisioning EC2 Instance Profile for Karpenter nodes (disabled by default via legacy flags) +- More options for specifying Availability Zones +- Deprecate `eks/ebs-controller` and `eks/efs-controller` +- Deprecate `eks/eks-without-spotinst` + +### why + +- Support EKS add-ons, follow-up to #723 +- Support BottleRocket, `gp3` storage, and provisioned iops and throughput +- Feature never worked +- Avoid specific failure mode when deleting and recreating an EKS cluster +- Maintain feature parity with `vpc` component +- Replace with add-ons +- Was not being maintained or used + +
+ +
+ [eks/storage-class] Initial implementation @Nuru (#794) + +### what + +- Initial implementation of `eks/storage-class` + +### why + +- Until now, we provisioned StorageClasses as a part of deploying + [eks/ebs-controller](https://github.com/cloudposse/terraform-aws-components/blob/ba309ab4ffa96169b2b8dadce0643d13c1bd3ae9/modules/eks/ebs-controller/main.tf#L20-L56) + and + [eks/efs-controller](https://github.com/cloudposse/terraform-aws-components/blob/ba309ab4ffa96169b2b8dadce0643d13c1bd3ae9/modules/eks/efs-controller/main.tf#L48-L60). + However, with the switch from deploying "self-managed" controllers to EKS add-ons, we no longer deploy + `eks/ebs-controller` or `eks/efs-controller`. Therefore, we need a new component to manage StorageClasses + independently of controllers. + +### references + +- #723 + +
+ +
+ [eks/karpenter] Script to update Karpenter CRDs @Nuru (#793) + +### what + +- [eks/karpenter] Script to update Karpenter CRDs + +### why + +- Upgrading Karpenter to v0.28.0 requires updating CRDs, which is not handled by current Helm chart. This script updates + them by modifying the existing CRDs to be labeled as being managed by Helm, then installing the `karpenter-crd` Helm + chart. + +### references + +- Karpenter [CRD Upgrades](https://karpenter.sh/docs/upgrade-guide/#custom-resource-definition-crd-upgrades) + +
+ +## 1.269.0 (2023-08-03T20:47:56Z) + +
+ upstream `api-gateway` and `api-gateway-settings` @Benbentwo (#788) + +### what + +- Upstream api-gateway and it's corresponding settings component + +
+ +## 1.268.0 (2023-08-01T05:04:37Z) + +
+ Added new variable into `argocd-repo` component to configure ArgoCD's `ignore-differences` @zdmytriv (#785) + +### what + +- Added new variable into `argocd-repo` component to configure ArcoCD `ignore-differences` + +### why + +- There are cases when application and/or third-party operators might want to change k8s API objects. For example, + change the number of replicas in deployment. This will conflict with ArgoCD application because the ArgoCD controller + will spot drift and will try to make an application in sync with the codebase. + +### references + +- https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/#respect-ignore-difference-configs + +
+ +## 1.267.0 (2023-07-31T19:41:43Z) + +
+ Spacelift `admin-stack` `var.description` @milldr (#787) + +### what + +- added missing description option + +### why + +- Variable is defined, but never passed to the modules + +### references + +n/a + +
+ +## 1.266.0 (2023-07-29T18:00:25Z) + +
+ Use s3_object_ownership variable @sjmiller609 (#779) + +### what + +- Pass s3_object_ownership variable into s3 module + +### why + +- I think it was accidentally not included +- Make possible to disable ACL from stack config + +### references + +- https://github.com/cloudposse/terraform-aws-s3-bucket/releases/tag/3.1.0 + +
+ +## 1.265.0 (2023-07-28T21:35:14Z) + +
+ `bastion` support for `availability_zones` and public IP and subnets @milldr (#783) + +### what + +- Add support for `availability_zones` +- Fix issue with public IP and subnets +- `tflint` requirements -- removed all unused locals, variables, formatting + +### why + +- All instance types are not available in all AZs in a region +- Bug fix + +### references + +- [Internal Slack reference](https://cloudposse.slack.com/archives/C048LCN8LKT/p1689085395494969) + +
+ +## 1.264.0 (2023-07-28T18:57:28Z) + +
+ Aurora Resource Submodule Requirements @milldr (#775) + +### what + +- Removed unnecessary requirement for aurora resources for the service name not to equal the user name for submodules of + both aurora resource components + +### why + +- This conditional doesn't add any value besides creating an unnecessary restriction. We should be able to create a user + name as the service name if we want + +### references + +- n/a + +
+ +## 1.263.0 (2023-07-28T18:12:30Z) + +
+ fix: restore notifications config in argocd @dudymas (#782) + +### what + +- Restore ssm configuration options for argocd notifications + +### why + +- notifications were not firing and tasks time out in some installations + +
+ +## 1.262.0 (2023-07-27T17:05:37Z) + +
+ Upstream `spa-s3-cloudfront` @milldr (#780) + +### what + +- Update module +- Add Cloudfront Invalidation permission to GitHub policy + +### why + +- Corrected bug in the module +- Allow GitHub Actions to run invalidations + +### references + +- https://github.com/cloudposse/terraform-aws-cloudfront-s3-cdn/pull/288 + +
+ +## 1.261.0 (2023-07-26T16:20:37Z) + +
+ Upstream `spa-s3-cloudfront` @milldr (#778) + +### what + +- Upstream changes to `spa-s3-cloudfront` + +### why + +- Updated the included modules to support Terraform v5 +- Handle disabled WAF from remote-state + +### references + +- https://github.com/cloudposse/terraform-aws-cloudfront-s3-cdn/pull/284 + +
+ +## 1.260.1 (2023-07-25T05:10:20Z) + +### πŸš€ Enhancements + +
+ [vpc] bugfix, [aurora-postgres] & [cloudtrail-bucket] Tflint fixes @Nuru (#776) + +### what + +- [vpc]: disable vpc_endpoints when enabled = false +- [aurora-postgres]: ensure variables have explicit types +- [cloudtrail-bucket]: ensure variables have explicit types + +### why + +- bugfix +- tflint fix +- tflint fix + +
+ +### πŸ› Bug Fixes + +
+ [vpc] bugfix, [aurora-postgres] & [cloudtrail-bucket] Tflint fixes @Nuru (#776) + +### what + +- [vpc]: disable vpc_endpoints when enabled = false +- [aurora-postgres]: ensure variables have explicit types +- [cloudtrail-bucket]: ensure variables have explicit types + +### why + +- bugfix +- tflint fix +- tflint fix + +
+ +## 1.260.0 (2023-07-23T23:08:53Z) + +
+ Update `alb` component @aknysh (#773) + +### what + +- Update `alb` component + +### why + +- Fixes after provisioning and testing on AWS + +
+ +## 1.259.0 (2023-07-20T04:32:13Z) + +
+ `elasticsearch` DNS Component Lookup @milldr (#769) + +### what + +- add environment for `dns-delegated` component lookup + +### why + +- `elasticsearch` is deployed in a regional environment, but `dns-delegated` is deployed to `gbl` + +### references + +- n/a + +
+ +## 1.258.0 (2023-07-20T04:17:31Z) + +
+ Bump `lambda-elasticsearch-cleanup` module @milldr (#768) + +### what + +- bump version of `lambda-elasticsearch-cleanup` module + +### why + +- Support Terraform provider v5 + +### references + +- https://github.com/cloudposse/terraform-aws-lambda-elasticsearch-cleanup/pull/48 + +
+ +## 1.257.0 (2023-07-20T03:04:51Z) + +
+ Bump ECS cluster module @max-lobur (#752) + +### what + +- Update ECS cluster module + +### why + +- Maintenance + +
+ +## 1.256.0 (2023-07-18T23:57:44Z) + +
+ Bump `elasticache-redis` Module @milldr (#767) + +### what + +- Bump `elasticache-redis` module + +### why + +- Resolve issues with terraform provider v5 + +### references + +- https://github.com/cloudposse/terraform-aws-elasticache-redis/issues/199 + +
+ +## 1.255.0 (2023-07-18T22:53:51Z) + +
+ Aurora Postgres Enhanced Monitoring Input @milldr (#766) + +### what + +- Added `enhanced_monitoring_attributes` as option +- Set default `aurora-mysql` component name + +### why + +- Set this var with a custom value to avoid IAM role length restrictions (default unchanged) +- Set common value as default + +### references + +- n/a + +
+ +## 1.254.0 (2023-07-18T21:00:30Z) + +
+ feat: acm no longer requires zone @dudymas (#765) + +### what + +- `acm` only looks up zones if `process_domain_validation_options` is true + +### why + +- Allow external validation of acm certs + +
+ +## 1.253.0 (2023-07-18T17:45:16Z) + +
+ `alb` and `ssm-parameters` Upstream for Basic Use @milldr (#763) + +### what + +- `alb` component can get the ACM cert from either `dns-delegated` or `acm` +- Support deploying `ssm-parameters` without SOPS +- `waf` requires a value for `visibility_config` in the stack catalog + +### why + +- resolving bugs while deploying example components + +### references + +- https://cloudposse.atlassian.net/browse/JUMPSTART-1185 + +
+ +## 1.252.0 (2023-07-18T16:14:23Z) + +
+ fix: argocd flags, versions, and expressions @dudymas (#753) + +### what + +- adjust expressions in argocd +- update helmchart module +- tidy up variables + +### why + +- component wouldn't run + +
+ +## 1.251.0 (2023-07-15T03:47:29Z) + +
+ fix: ecs capacity provider typing @dudymas (#762) + +### what + +- Adjust typing of `capacity_providers_ec2` + +### why + +- Component doesn't work without these fixes + +
+ +## 1.250.3 (2023-07-15T00:31:40Z) + +### πŸš€ Enhancements + +
+ Update `alb` and `eks/alb-controller` components @aknysh (#760) + +### what + +- Update `alb` and `eks/alb-controller` components + +### why + +- Remove unused variables and locals +- Apply variables that are defined in `variables.tf` but were not used + +
+ +## 1.250.2 (2023-07-14T23:34:14Z) + +### πŸš€ Enhancements + +
+ [aws-teams] Remove obsolete restriction on assuming roles in identity account @Nuru (#761) + +### what + +- [aws-teams] Remove obsolete restriction on assuming roles in the `identity` account + +### why + +Some time ago, there was an implied permission for any IAM role to assume any other IAM role in the same account if the +originating role had sufficient permissions to perform `sts:AssumeRole`. For this reason, we had an explicit policy +against assuming roles in the `identity` account. + +AWS has removed that implied permission and now requires all roles to have explicit trust policies. Our current Team +structure requires Teams (e.g. `spacelift`) to be able to assume roles in `identity` (e.g. `planner`). Therefore, the +previous restriction is both not needed and actually hinders desired operation. + +
+ +### πŸ› Bug Fixes + +
+ [aws-teams] Remove obsolete restriction on assuming roles in identity account @Nuru (#761) + +### what + +- [aws-teams] Remove obsolete restriction on assuming roles in the `identity` account + +### why + +Some time ago, there was an implied permission for any IAM role to assume any other IAM role in the same account if the +originating role had sufficient permissions to perform `sts:AssumeRole`. For this reason, we had an explicit policy +against assuming roles in the `identity` account. + +AWS has removed that implied permission and now requires all roles to have explicit trust policies. Our current Team +structure requires Teams (e.g. `spacelift`) to be able to assume roles in `identity` (e.g. `planner`). Therefore, the +previous restriction is both not needed and actually hinders desired operation. + +
+ +## 1.250.1 (2023-07-14T02:14:46Z) + +### πŸš€ Enhancements + +
+ [eks/karpenter-provisioner] minor improvements @Nuru (#759) + +### what + +- [eks/karpenter-provisioner]: + - Implement `metadata_options` + - Avoid Terraform errors by marking Provisoner `spec.requirements` a computed field + - Add explicit error message about Consolidation and TTL Seconds After Empty being mutually exclusive + - Add `instance-category` and `instance-generation` to example in README + - Make many inputs optional +- [eks/karpenter] Update README to indicate that version 0.19 or later of Karpenter is required to work with this code. + +### why + +- Bug Fix: Input was there, but was being ignored, leading to unexpected behavior +- If a requirement that had a default value was not supplied, Terraform would fail with an error about inconsistent + plans because Karpenter would fill in the default +- Show some default values and how to override them +- Reduce the burden of supplying empty fields + +
+ +## 1.250.0 (2023-07-14T02:10:46Z) + +
+ Add EKS addons and the required IRSA to the `eks` component @aknysh (#723) + +### what + +- Deprecate the `eks-iam` component +- Add EKS addons and the required IRSA for the addons to the `eks` component +- Add ability to specify configuration values and timeouts for addons +- Add ability to deploy addons to Fargate when necessary +- Add ability to omit specifying Availability Zones and infer them from private subnets +- Add recommended but optional and requiring opt-in: use a single Fargate Pod Execution Role for all Fargate Profiles + +### why + +- The `eks-iam` component is not in use (we now create the IAM roles for Kubernetes Service Accounts in the + https://github.com/cloudposse/terraform-aws-helm-release module), and has very old and outdated code + +- AWS recommends to provision the required EKS addons and not to rely on the managed addons (some of which are + automatically provisioned by EKS on a cluster) + +- Some EKS addons (e.g. `vpc-cni` and `aws-ebs-csi-driver`) require an IAM Role for Kubernetes Service Account (IRSA) + with specific permissions. Since these addons are critical for cluster functionality, we create the IRSA roles for the + addons in the `eks` component and provide the role ARNs to the addons + +- Some EKS addons can be configured. In particular, `coredns` requires configuration to enable it to be deployed to + Fargate. + +- Users relying on Karpenter to deploy all nodes and wanting to deploy `coredns` or `aws-ebs-csi-driver` addons need to + deploy them to Fargate or else the EKS deployment will fail. + +- Enable DRY specification of Availability Zones, and use of AZ IDs, by reading the VPCs AZs. + +- A cluster needs only one Fargate Pod Execution Role, and it was a mistake to provision one for every profile. However, + making the change would break existing clusters, so it is optional and requires opt-in. + +### references + +- https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html +- https://docs.aws.amazon.com/eks/latest/userguide/managing-add-ons.html#creating-an-add-on +- https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html +- https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html +- https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role +- https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#deploy-vpc-cni-managed-add-on +- https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html +- https://aws.amazon.com/blogs/containers/amazon-ebs-csi-driver-is-now-generally-available-in-amazon-eks-add-ons +- https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html#csi-iam-role +- https://github.com/kubernetes-sigs/aws-ebs-csi-driver + +
+ +## 1.249.0 (2023-07-14T01:23:37Z) + +
+ Make alb-controller default Ingress actually the default Ingress @Nuru (#758) + +### what + +- Make the `alb-controller` default Ingress actually the default Ingress + +### why + +- When setting `default_ingress_enabled = true` it is a reasonable expectation that the deployed Ingress be marked as + the Default Ingress. The previous code suggests this was the intended behavior, but does not work with the current + Helm chart and may have never worked. + +
+ +## 1.248.0 (2023-07-13T00:21:29Z) + +
+ Upstream `gitops` Policy Update @milldr (#757) + +### what + +- allow actions on table resources + +### why + +- required to be able to query using a global secondary index + +### references + +- https://github.com/cloudposse/github-action-terraform-plan-storage/pull/16 + +
+ +## 1.247.0 (2023-07-12T19:32:33Z) + +
+ Update `waf` and `alb` components @aknysh (#755) + +### what + +- Update `waf` component +- Update `alb` component + +### why + +- For `waf` component, add missing features supported by the following resources: + + - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl + - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl_logging_configuration + +- For `waf` component, remove deprecated features not supported by Terraform `aws` provider v5: + + - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/guides/version-5-upgrade#resourceaws_wafv2_web_acl + - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/guides/version-5-upgrade#resourceaws_wafv2_web_acl_logging_configuration + +- For `waf` component, allow specifying a list of Atmos components to read from the remote state and associate their + ARNs with the web ACL + +- For `alb` component, update the modules to the latest versions and allow specifying Atmos component names for the + remote state in the variables (for the cases where the Atmos component names are not standard) + +### references + +- https://github.com/cloudposse/terraform-aws-waf/pull/45 + +
+ +## 1.246.0 (2023-07-12T18:57:58Z) + +
+ `acm` Upstream @Benbentwo (#756) + +### what + +- Upstream ACM + +### why + +- New Variables + - `subject_alternative_names_prefixes` + - `domain_name_prefix` + +
+ +## 1.245.0 (2023-07-11T19:36:11Z) + +
+ Bump `spaces` module versions @milldr (#754) + +### what + +- bumped module version for `terraform-spacelift-cloud-infrastructure-automation` + +### why + +- New policy added to `spaces` + +### references + +- https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/releases/tag/1.1.0 + +
+ +## 1.244.0 (2023-07-11T17:50:19Z) + +
+ Upstream Spacelift and Documentation @milldr (#732) + +### what + +- Minor corrections to spacelift components +- Documentation + +### why + +- Deployed this at a customer and resolved the changed errors +- Adding documentation for updated Spacelift design + +### references + +- n/a + +
+ +## 1.243.0 (2023-07-06T20:04:08Z) + +
+ Upstream `gitops` @milldr (#735) + +### what + +- Upstream new component, `gitops` + +### why + +- This component is used to create a role for GitHub to assume. This role is used to assume the `gitops` team and is + required for enabling GitHub Action Terraform workflows + +### references + +- JUMPSTART-904 + +
+ +## 1.242.1 (2023-07-05T19:46:08Z) + +### πŸš€ Enhancements + +
+ Use the new subnets data source @max-lobur (#737) + +### what + +- Use the new subnets data source + +### why + +- Planned migration according to https://github.com/hashicorp/terraform-provider-aws/pull/18803 + +
+ +## 1.242.0 (2023-07-05T17:05:57Z) + +
+ Restore backwards compatibility of account-map output @Nuru (#748) + +### what + +- Restore backwards compatibility of `account-map` output + +### why + +- PR #715 removed outputs from `account-map` that `iam-roles` relied on. Although it removed the references in + `iam-roles`, this imposed an ordering on the upgrade: the `iam-roles` code had to be deployed before the module could + be applied. That proved to be inconvenient. Furthermore, if a future `account-map` upgrade added outputs that + iam-roles`required, neither order of operations would go smoothly. With this update, the standard practice of applying`account-map` + before deploying code will work again. + +
+ +## 1.241.0 (2023-07-05T16:52:58Z) + +
+ Fixed broken links in READMEs @zdmytriv (#749) + +### what + +- Fixed broken links in READMEs + +### why + +- Fixed broken links in READMEs + +### references + +- https://github.com/cloudposse/terraform-aws-components/issues/747 + +
+ +## 1.240.1 (2023-07-04T04:54:28Z) + +### Upgrade notes + +This fixes issues with `aws-sso` and `github-oidc-provider`. Versions from v1.227 through v1.240 should not be used. + +After installing this version of `aws-sso`, you may need to change the configuration in your stacks. See +[modules/aws-sso/changelog](https://github.com/cloudposse/terraform-aws-components/blob/main/modules/aws-sso/CHANGELOG.md) +for more information. Note: this release is from PR #740 + +After installing this version of `github-oidc-provider`, you may need to change the configuration in your stacks. See +the release notes for v1.238.1 for more information. + +### πŸ› Bug Fixes + +
+ bugfix `aws-sso`, `github-oidc-provider` @Benbentwo (#740) + +### what + +- Bugfixes `filter` depreciation issue via module update to `1.1.1` +- Bugfixes missing `aws.root` provider +- Bugfixes `github-oidc-provider` v1.238.1 + +### why + +- Bugfixes + +### references + +- https://github.com/cloudposse/terraform-aws-sso/pull/44 +- closes #744 + +
+ +## 1.240.0 (2023-07-03T18:14:14Z) + +
+ Fix TFLint violations in account-map @MaxymVlasov (#745) + +### Why + +I'm too lazy to fix it each time when we get module updates via `atmos vendor` GHA + +### References + +- https://github.com/terraform-linters/tflint-ruleset-terraform/blob/v0.4.0/docs/rules/terraform_deprecated_index.md +- https://github.com/terraform-linters/tflint-ruleset-terraform/blob/v0.4.0/docs/rules/terraform_comment_syntax.md +- https://github.com/terraform-linters/tflint-ruleset-terraform/blob/v0.4.0/docs/rules/terraform_unused_declarations.md + +
+ +## 1.239.0 (2023-06-29T23:34:53Z) + +
+ Bump `cloudposse/ec2-autoscale-group/aws` to `0.35.0` @milldr (#734) + +### what + +- bumped ASG module version, `cloudposse/ec2-autoscale-group/aws` to `0.35.0` + +### why + +- Recent versions of this module resolve errors for these components + +### references + +- https://github.com/cloudposse/terraform-aws-ec2-autoscale-group + +
+ +## 1.238.1 (2023-06-29T21:15:50Z) + +### Upgrade notes: + +There is a bug in this version of `github-oidc-provider`. Upgrade to version v1.240.1 or later instead. + +After installing this version of `github-oidc-provider`, you may need to change the configuration in your stacks. + +- If you have dynamic Terraform roles enabled, then this should be configured like a normal component. The previous + component may have required you to set + + ```yaml + backend: + s3: + role_arn: null + ```` + + and **that configuration should be removed** everywhere. + +- If you only use SuperAdmin to deploy things to the `identity` account, then for the `identity` (and `root`, if + applicable) account **_only_**, set + + ```yaml + backend: + s3: + role_arn: null + vars: + superadmin: true + ```` + + **Deployments to other accounts should not have any of those settings**. + +### πŸš€ Enhancements + +
+ [github-oidc-provider] extra-compatible provider @Nuru (#742) + +### what && why + +- This updates `provider.tf` to provide compatibility with various legacy configurations as well as the current + reference architecture +- This update does NOT require updating `account-map` + +
+ +## 1.238.0 (2023-06-29T19:39:15Z) + +
+ IAM upgrades: SSO Permission Sets as Teams, SourceIdentity support, region independence @Nuru (#738) + +### what + +- Enable SSO Permission Sets to function as teams +- Allow SAML sign on via any regional endpoint, not only us-east-1 +- Allow use of AWS "Source Identity" for SAML and SSO users (not enabled for OIDC) + +### why + +- Reduce the friction between SSO permission sets and SAML roles by allowing people to use either interchangeably. + (Almost. SSO permission sets do not yet have the same permissions as SAML roles in the `identity` account itself.) +- Enable continued access in the event of a regional outage in us-east-1 as happened recently +- Enable auditing of who is using assumed roles + +### References + +- [Monitor and control actions taken with assumed roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) +- [How to integrate AWS STS SourceIdentity with your identity provider](https://aws.amazon.com/blogs/security/how-to-integrate-aws-sts-sourceidentity-with-your-identity-provider/) +- [AWS Sign-In endpoints](https://docs.aws.amazon.com/general/latest/gr/signin-service.html) +- [Available keys for SAML-based AWS STS federation](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#condition-keys-saml) + +### Upgrade notes + +The regional endpoints and Source Identity support are non-controversial and cannot be disabled. They do, however, +require running `terraform apply` against `aws-saml`, `aws-teams`, and `aws-team-roles` in all accounts. + +#### AWS SSO updates + +To enable SSO Permission Sets to function as teams, you need to update `account-map` and `aws-sso`, then apply changes +to + +- `tfstate-backend` +- `aws-teams` +- `aws-team-roles` +- `aws-sso` + +This is all enabled by default. If you do not want it, you only need to update `account-map`, and add +`account-map/modules/roles-to-principles/variables_override.tf` in which you set +`overridable_team_permission_sets_enabled` to default to `false` + +Under the old `iam-primary-roles` component, corresponding permission sets were named `IdentityRoleAccess`. Under +the current `aws-teams` component, they are named `IdentityTeamAccess`. The current `account-map` defaults to the +latter convention. To use the earlier convention, add `account-map/modules/roles-to-principles/variables_override.tf` in +which you set `overridable_team_permission_set_name_pattern` to default to `"Identity%sRoleAccess"` + +There is a chance the resulting trust policies will be too big, especially for `tfstate-backend`. If you get an error +like + +``` +Cannot exceed quota for ACLSizePerRole: 2048 +``` + +You need to request a quota increase (Quota Code L-C07B4B0D), which will be automatically granted, usually in about 5 +minutes. The max quota is 4096, but we recommend increasing it to 3072 first, so you retain some breathing room for the +future. + +
+ +## 1.237.0 (2023-06-27T22:27:49Z) + +
+ Add Missing `github-oidc-provider` Thumbprint @milldr (#736) + +### what + +- include both thumbprints for GitHub OIDC + +### why + +- There are two possible intermediary certificates for the Actions SSL certificate and either can be returned by + Github's servers, requiring customers to trust both. This is a known behavior when the intermediary certificates are + cross-signed by the CA. + +### references + +- https://github.blog/changelog/2023-06-27-github-actions-update-on-oidc-integration-with-aws/ + +
+ +## 1.236.0 (2023-06-26T18:14:29Z) + +
+ Update `eks/echo-server` and `eks/alb-controller-ingress-group` components @aknysh (#733) + +### what + +- Update `eks/echo-server` and `eks/alb-controller-ingress-group` components +- Allow specifying + [alb.ingress.kubernetes.io/scheme](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/#scheme) + (`internal` or `internet-facing`) + +### why + +- Allow the echo server to work with internal load balancers + +### references + +- https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/ + +
+ +## 1.235.0 (2023-06-22T21:06:18Z) + +
+ [account-map] Backwards compatibility for terraform profile users and eks/cluster @Nuru (#731) + +### what + +- [account-map/modules/iam-roles] Add `profiles_enabled` input to override global value +- [eks/cluster] Use `iam-roles` `profiles_enabled` input to force getting a role ARN even when profiles are in use +- [guardduty] Make providers compatible with static and dynamic TF roles + +### why + +- Previously, when the global `account-map` `profiles_enabled` flag was `true`, `iam_roles.terraform_role_arn` would be + null. However, `eks/cluster` requires `terraform_role_arn` regardless. +- Changes made in #728 work in environments that have not adopted dynamic Terraform roles but would fail in environments + that have (when using SuperAdmin) + +
+ +## 1.234.0 (2023-06-21T22:44:55Z) + +
+ [account-map] Feature flag to enable legacy Terraform role mapping @Nuru (#730) + +### what + +- [account-map] Add `legacy_terraform_uses_admin` feature flag to retain backwards compatibility + +### why + +- Historically, the `terraform` roles in `root` and `identity` were not used for Terraform plan/apply, but for other + things, and so the `terraform_roles` map output selected the `admin` roles for those accounts. This "wart" has been + remove in current `aws-team-roles` and `tfstate-backend` configurations, but for people who do not want to migrate to + the new conventions, this feature flag enables them to maintain the status quo with respect to role usage while taking + advantage of other updates to `account-map` and other components. + +### references + +This update is recommended for all customers wanting to use **_any_** component version 1.227 or later. + +- #715 +- + +
+ +## 1.233.0 (2023-06-21T20:03:36Z) + +
+ [lambda] feat: allows to use YAML instead of JSON for IAM policy @gberenice (#692) + +### what + +- BREAKING CHANGE: Actually use variable `function_name` to set the lambda function name. +- Make the variable `function_name` optional. When not set, the old null-lable-derived name will be use. +- Allow IAM policy to be specified in a custom terraform object as an alternative to JSON. + +### why + +- `function_name` was required to set, but it wasn't actually passed to `module "lambda"` inputs. +- Allow callers to stop providing `function_name` and preserve old behavior of using automatically generated name. +- When using [Atmos](https://atmos.tools/) to generate inputs from "stack" YAML files, having the ability to pass the + statements in as a custom object means specifying them via YAML, which makes the policy declaration in stack more + readable compared to embedding a JSON string in the YAML. + +
+ +## 1.232.0 (2023-06-21T15:49:06Z) + +
+ refactor securityhub component @mcalhoun (#728) + +### what + +- Refactor the Security Hub components into a single component + +### why + +- To improve the overall dev experience and to prevent needing to do multiple deploys with variable changes in-between. + +
+ +## 1.231.0 (2023-06-21T14:54:50Z) + +
+ roll guard duty back to previous providers logic @mcalhoun (#727) + +### what + +- Roll the Guard Duty component back to using the previous logic for role assumption. + +### why + +- The newer method is causing the provider to try to assume the role twice. We get the error: + +``` +AWS Error: operation error STS: AssumeRole, https response error StatusCode: 403, RequestID: 00000000-0000-0000-0000-00000000, api error AccessDenied: User: arn:aws:sts::000000000000:assumed-role/acme-core-gbl-security-terraform/aws-go-sdk-1687312396297825294 is not authorized to perform: sts:AssumeRole on resource: arn:aws:iam::000000000000:role/acme-core-gbl-security-terraform +``` + +
+ +## 1.230.0 (2023-06-21T01:49:52Z) + +
+ refactor guardduty module @mcalhoun (#725) + +### what + +- Refactor the GuardDuty components into a single component + +### why + +- To improve the overall dev experience and to prevent needing to do multiple deploys with variable changes in-between. + +
+ +## 1.229.0 (2023-06-20T19:37:35Z) + +
+ upstream `github-action-runners` dockerhub authentication @Benbentwo (#726) + +### what + +- Adds support for dockerhub authentication + +### why + +- Dockerhub limits are unrealistically low for actually using dockerhub as an image registry for automated builds + +
+ +## 1.228.0 (2023-06-15T20:57:45Z) + +
+ alb: use the https_ssl_policy @johncblandii (#722) + +### what + +- Apply the HTTPS policy + +### why + +- The policy was unused so it was defaulting to an old policy + +### references + +
+ +## 1.227.0 (2023-06-12T23:41:45Z) + +Possibly breaking change: + +In this update, `account-map/modules/iam-roles` acquired a provider, making it no longer able to be used with `count`. +If you have code like + +```hcl +module "optional_role" { + count = local.optional_role_enabled ? 1 : 0 + + source = "../account-map/modules/iam-roles" + stage = var.optional_role_stage + context = module.this.context +} +``` + +You will need to rewrite it, removing the `count` parameter. It will be fine to always instantiate the module. If there +are problems with ensuring appropriate settings with the module is disabled, you can always replace them with the +component's inputs: + +```hcl +module "optional_role" { + source = "../account-map/modules/iam-roles" + stage = local.optional_role_enabled ? var.optional_role_stage : var.stage + context = module.this.context +} +``` + +The update to components 1.227.0 is huge, and you have options. + +- Enable, or not, dynamic Terraform IAM roles, which allow you to give some people (and Spacelift) the ability to run + Terraform plan in some accounts without allowing apply. Note that these users will still have read/write access to + Terraform state, but will not have IAM permissions to make changes in accounts. + [terraform_dynamic_role_enabled](https://github.com/cloudposse/terraform-aws-components/blob/1b338fe664e5debc5bbac30cfe42003f7458575a/modules/account-map/variables.tf#L96-L100) +- Update to new `aws-teams` team names. The new names are (except for support) distinct from team-roles, making it + easier to keep track. Also, the new managers team can run Terraform for identity and root in most (but not all) cases. +- Update to new `aws-team-roles`, including new permissions. The custom policies that have been removed are replaced in + the `aws-team-roles` configuration with AWS managed policy ARNs. This is required to add the `planner` role and + support the `terraform plan` restriction. +- Update the `providers.tf for` all components. Or some of them now, some later. Most components do not require updates, + but all of them have updates. The new `providers.tf`, when used with dynamic Terraform roles, allows users directly + logged into target accounts (rather than having roles in the `identity` account) to use Terraform in that account, and + also allows SuperAdmin to run Terraform in more cases (almost everywhere). + +**If you do not want any new features**, you only need to update `account-map` to v1.235 or later, to be compatible with +future components. Note that when updating `account-map` this way, you should update the code everywhere (all open PRs +and branches) before applying the Terraform changes, because the applied changes break the old code. + +If you want all the new features, we recommend updating all of the following to the current release in 1 PR: + +- account-map +- aws-teams +- aws-team-roles +- tfstate-backend + +
+ Enable `terraform plan` access via dynamic Terraform roles @Nuru (#715) + +### Reviewers, please note: + +The PR changes a lot of files. In particular, the `providers.tf` and therefore the `README.md` for nearly every +component. Therefore it will likely be easier to review this PR one commit at a time. + +`import_role_arn` and `import_profile_name` have been removed as they are no longer needed. Current versions of +Terraform (probably beginning with v1.1.0, but maybe as late as 1.3.0, I have not found authoritative information) can +read data sources during plan and so no longer need a role to be explicitly specified while importing. Feel free to +perform your own tests to make yourself more comfortable that this is correct. + +### what + +- Updates to allow Terraform to dynamically assume a role based on the user, to allow some users to run `terraform plan` + but not `terraform apply` + - Deploy standard `providers.tf` to all components that need an `aws` provider + - Move extra provider configurations to separate file, so that `providers.tf` can remain consistent/identical among + components and thus be easily updated + - Create `provider-awsutils.mixin.tf` to provide consistent, maintainable implementation +- Make `aws-sso` vendor safe +- Deprecate `sso` module in favor of `aws-saml` + +### why + +- Allow users to try new code or updated configurations by running `terraform plan` without giving them permission to + make changes with Terraform +- Make it easier for people directly logged into target accounts to still run Terraform +- Follow-up to #697, which updated `aws-teams` and `aws-team-roles`, to make `aws-sso` consistent +- Reduce confusion by moving deprecated code to `deprecated/` + +
+ +## 1.226.0 (2023-06-12T17:42:51Z) + +
+ chore: Update and add more basic pre-commit hooks @MaxymVlasov (#714) + +### what + +Fix common issues in the repo + +### why + +It violates our basic checks, which adds a headache to using +https://github.com/cloudposse/github-action-atmos-component-updater as is + +![image](https://github.com/cloudposse/terraform-aws-components/assets/11096782/248febbe-b65f-4080-8078-376ef576b457) + +> **Note**: It is much simpler to review PR if +> [hide whitespace changes](https://github.com/cloudposse/terraform-aws-components/pull/714/files?w=1) + +
+ +## 1.225.0 (2023-06-12T14:57:20Z) + +
+ Removed list of components from main README.md @zdmytriv (#721) + +### what + +- Removed list of components from main README.md + +### why + +- That list is outdated + +### references + +
+ +## 1.224.0 (2023-06-09T19:52:51Z) + +
+ upstream argocd @Benbentwo (#634) + +### what + +- Upstream fixes that allow for Google OIDC + +
+ +## 1.223.0 (2023-06-09T14:28:08Z) + +
+ add new spacelift components @mcalhoun (#717) + +### what + +- Add the newly developed spacelift components +- Deprecate the previous components + +### why + +- We undertook a process of decomposing a monolithic module and broke it into smaller, composable pieces for a better + developer experience + +### references + +- Corresponding + [Upstream Module PR](https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/pull/143) + +
+ +## 1.222.0 (2023-06-08T23:28:34Z) + +
+ Karpenter Node Interruption Handler @milldr (#713) + +### what + +- Added Karpenter Interruption Handler to existing component + +### why + +- Interruption is supported by karpenter, but we need to deploy sqs queue and event bridge rules to enable + +### references + +- https://github.com/cloudposse/knowledge-base/discussions/127 + +
+ +## 1.221.0 (2023-06-07T18:11:23Z) + +
+ feat: New Component `aws-ssosync` @dudymas (#625) + +### what + +- adds a fork of [aws-ssosync](https://github.com/awslabs/ssosync) as a lambda on a 15m cronjob + +### Why + +Google is one of those identity providers that doesn't have good integration with AWS SSO. In order to sync groups and +users across we need to use some API calls, luckily AWS Built [aws-ssosync](https://github.com/awslabs/ssosync) to +handle that. + +Unfortunately, it required ASM so we use [Benbentwo/ssosync](https://github.com/Benbentwo/ssosync) as it removes that +requirement. + +
+ +## 1.220.0 (2023-06-05T22:31:10Z) + +
+ Disable helm experiments by default, block Kubernetes provider 2.21.0 @Nuru (#712) + +### what + +- Set `helm_manifest_experiment_enabled` to `false` by default +- Block Kubernetes provider 2.21.0 + +### why + +- The `helm_manifest_experiment_enabled` reliably breaks when a Helm chart installs CRDs. The initial reason for + enabling it was for better drift detection, but the provider seems to have fixed most if not all of the drift + detection issues since then. +- Kubernetes provider 2.21.0 had breaking changes which were reverted in 2.21.1. + +### references + +- https://github.com/hashicorp/terraform-provider-kubernetes/pull/2084#issuecomment-1576711378 + +
+ +## 1.219.0 (2023-06-05T20:23:17Z) + +
+ Expand ECR GH OIDC Default Policy @milldr (#711) + +### what + +- updated default ECR GH OIDC policy + +### why + +- This policy should grant GH OIDC access both public and private ECR repos + +### references + +- https://cloudposse.slack.com/archives/CA4TC65HS/p1685993698149499?thread_ts=1685990234.560589&cid=CA4TC65HS + +
+ +## 1.218.0 (2023-06-05T01:59:49Z) + +
+ Move `profiles_enabled` logic out of `providers.tf` and into `iam-roles` @Nuru (#702) + +### what + +- For Terraform roles and profiles used in `providers.tf`, return `null` for unused option +- Rename variables to `overridable_*` and update documentation to recommend `variables_override.tf` for customization + +### why + +- Prepare for `providers.tf` updates to support dynamic Terraform roles +- ARB decision on customization compatible with vendoring + +
+ +## 1.217.0 (2023-06-04T23:11:44Z) + +
+ [eks/external-secrets-operator] Normalize variables, update dependencies @Nuru (#708) + +### what + +For `eks/external-secrets-operator`: + +- Normalize variables, update dependencies +- Exclude Kubernetes provider v2.21.0 + +### why + +- Bring in line with other Helm-based modules +- Take advantage of improvements in dependencies + +### references + +- [Breaking change in Kubernetes provider v2.21.0](https://github.com/hashicorp/terraform-provider-kubernetes/pull/2084) + +
+ +## 1.216.2 (2023-06-04T23:08:39Z) + +### πŸš€ Enhancements + +
+ Update modules for Terraform AWS provider v5 @Nuru (#707) + +### what + +- Update modules for Terraform AWS provider v5 + +### why + +- Provider version 5.0.0 was released with breaking changes. This fixes the breakage. + +### references + +- [v5 upgrade guide](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/guides/version-5-upgrade) +- [v5.0.0 Release Notes](https://github.com/hashicorp/terraform-provider-aws/releases/tag/v5.0.0) + +
+ +## 1.216.1 (2023-06-04T01:18:31Z) + +### πŸš€ Enhancements + +
+ Preserve custom roles when vendoring in updates @Nuru (#697) + +### what + +- Add `additional-policy-map.tf` as glue meant to be replaced by customers with map of their custom policies. + +### why + +- Currently, custom polices have to be manually added to the map in `main.tf`, but that gets overwritten with every + vendor update. Putting that map in a separate, optional file allows for the custom code to survive vendoring. + +
+ +## 1.216.0 (2023-06-02T18:02:01Z) + +
+ ssm-parameters: support tiers @johncblandii (#705) + +### what + +- Added support for ssm param tiers +- Updated the minimum version to `>= 1.3.0` to support `optional` parameters + +### why + +- `Standard` tier only supports 4096 characters. This allows Advanced and Intelligent Tiering support. + +### references + +
+ +## 1.215.0 (2023-06-02T14:28:29Z) + +
+ `.editorconfig` Typo @milldr (#704) + +### what + +fixed intent typo + +### why + +should be spelled "indent" + +### references + +https://cloudposse.slack.com/archives/C01EY65H1PA/p1685638634845009 + +
+ +## 1.214.0 (2023-05-31T17:46:35Z) + +
+ Transit Gateway `var.connections` Redesign @milldr (#685) + +### what + +- Updated how the connection variables for `tgw/hub` and `tgw/spoke` are defined +- Moved the old versions of `tgw` to `deprecated/tgw` + +### why + +- We want to be able to define multiple or alternately named `vpc` or `eks/cluster` components for both hub and spoke +- The cross-region components are not updated yet with this new design, since the current customers requesting these + updates do not need cross-region access at this time. But we want to still support the old design s.t. customers using + cross-region components can access the old components. We will need to update the cross-region components with follow + up effort + +### references + +- https://github.com/cloudposse/knowledge-base/discussions/112 + +
+ +## 1.213.0 (2023-05-31T14:50:16Z) + +
+ Introducing Security Hub @zdmytriv (#683) + +### what + +- Introducing Security Hub component + +### why + +Amazon Security Hub enables users to centrally manage and monitor the security and compliance of their AWS accounts and +resources. It aggregates, organizes, and prioritizes security findings from various AWS services, third-party tools, and +integrated partner solutions. + +Here are the key features and capabilities of Amazon Security Hub: + +- Centralized security management: Security Hub provides a centralized dashboard where users can view and manage + security findings from multiple AWS accounts and regions. This allows for a unified view of the security posture + across the entire AWS environment. + +- Automated security checks: Security Hub automatically performs continuous security checks on AWS resources, + configurations, and security best practices. It leverages industry standards and compliance frameworks, such as AWS + CIS Foundations Benchmark, to identify potential security issues. + +- Integrated partner solutions: Security Hub integrates with a wide range of AWS native services, as well as third-party + security products and solutions. This integration enables the ingestion and analysis of security findings from diverse + sources, offering a comprehensive security view. + +- Security standards and compliance: Security Hub provides compliance checks against industry standards and regulatory + frameworks, such as PCI DSS, HIPAA, and GDPR. It identifies non-compliant resources and provides guidance on + remediation actions to ensure adherence to security best practices. + +- Prioritized security findings: Security Hub analyzes and prioritizes security findings based on severity, enabling + users to focus on the most critical issues. It assigns severity levels and generates a consolidated view of security + alerts, allowing for efficient threat response and remediation. + +- Custom insights and event aggregation: Security Hub supports custom insights, allowing users to create their own rules + and filters to focus on specific security criteria or requirements. It also provides event aggregation and correlation + capabilities to identify related security findings and potential attack patterns. + +- Integration with other AWS services: Security Hub seamlessly integrates with other AWS services, such as AWS + CloudTrail, Amazon GuardDuty, AWS Config, and AWS IAM Access Analyzer. This integration allows for enhanced + visibility, automated remediation, and streamlined security operations. + +- Alert notifications and automation: Security Hub supports alert notifications through Amazon SNS, enabling users to + receive real-time notifications of security findings. It also facilitates automation and response through integration + with AWS Lambda, allowing for automated remediation actions. + +By utilizing Amazon Security Hub, organizations can improve their security posture, gain insights into security risks, +and effectively manage security compliance across their AWS accounts and resources. + +### references + +- https://aws.amazon.com/security-hub/ +- https://github.com/cloudposse/terraform-aws-security-hub/ + +
+ +## 1.212.0 (2023-05-31T14:45:30Z) + +
+ Introducing GuardDuty @zdmytriv (#682) + +### what + +- Introducing GuardDuty component + +### why + +AWS GuardDuty is a managed threat detection service. It is designed to help protect AWS accounts and workloads by +continuously monitoring for malicious activities and unauthorized behaviors. GuardDuty analyzes various data sources +within your AWS environment, such as AWS CloudTrail logs, VPC Flow Logs, and DNS logs, to detect potential security +threats. + +Key features and components of AWS GuardDuty include: + +- Threat detection: GuardDuty employs machine learning algorithms, anomaly detection, and integrated threat intelligence + to identify suspicious activities, unauthorized access attempts, and potential security threats. It analyzes event + logs and network traffic data to detect patterns, anomalies, and known attack techniques. + +- Threat intelligence: GuardDuty leverages threat intelligence feeds from AWS, trusted partners, and the global + community to enhance its detection capabilities. It uses this intelligence to identify known malicious IP addresses, + domains, and other indicators of compromise. + +- Real-time alerts: When GuardDuty identifies a potential security issue, it generates real-time alerts that can be + delivered through AWS CloudWatch Events. These alerts can be integrated with other AWS services like Amazon SNS or AWS + Lambda for immediate action or custom response workflows. + +- Multi-account support: GuardDuty can be enabled across multiple AWS accounts, allowing centralized management and + monitoring of security across an entire organization's AWS infrastructure. This helps to maintain consistent security + policies and practices. + +- Automated remediation: GuardDuty integrates with other AWS services, such as AWS Macie, AWS Security Hub, and AWS + Systems Manager, to facilitate automated threat response and remediation actions. This helps to minimize the impact of + security incidents and reduces the need for manual intervention. + +- Security findings and reports: GuardDuty provides detailed security findings and reports that include information + about detected threats, affected AWS resources, and recommended remediation actions. These findings can be accessed + through the AWS Management Console or retrieved via APIs for further analysis and reporting. + +GuardDuty offers a scalable and flexible approach to threat detection within AWS environments, providing organizations +with an additional layer of security to proactively identify and respond to potential security risks. + +### references + +- https://aws.amazon.com/guardduty/ +- https://github.com/cloudposse/terraform-aws-guardduty + +
+ +## 1.211.0 (2023-05-30T16:30:47Z) + +
+ Upstream `aws-inspector` @milldr (#700) + +### what + +Upstream `aws-inspector` from past engagement + +### why + +- This component was never upstreamed and now were want to use it again +- AWS Inspector is a security assessment service offered by Amazon Web Services (AWS). It helps you analyze and evaluate + the security and compliance of your applications and infrastructure deployed on AWS. AWS Inspector automatically + assesses the resources within your AWS environment, such as Amazon EC2 instances, for potential security + vulnerabilities and deviations from security best practices. Here are some key features and functionalities of AWS + Inspector: + + - Security Assessments: AWS Inspector performs security assessments by analyzing the behavior of your resources and + identifying potential security vulnerabilities. It examines the network configuration, operating system settings, + and installed software to detect common security issues. + + - Vulnerability Detection: AWS Inspector uses a predefined set of rules to identify common vulnerabilities, + misconfigurations, and security exposures. It leverages industry-standard security best practices and continuously + updates its knowledge base to stay current with emerging threats. + + - Agent-Based Architecture: AWS Inspector utilizes an agent-based approach, where you install an Inspector agent on + your EC2 instances. The agent collects data about the system and its configuration, securely sends it to AWS + Inspector, and allows for more accurate and detailed assessments. + + - Security Findings: After performing an assessment, AWS Inspector generates detailed findings that highlight security + vulnerabilities, including their severity level, impact, and remediation steps. These findings can help you + prioritize and address security issues within your AWS environment. + + - Integration with AWS Services: AWS Inspector seamlessly integrates with other AWS services, such as AWS + CloudFormation, AWS Systems Manager, and AWS Security Hub. This allows you to automate security assessments, manage + findings, and centralize security information across your AWS infrastructure. + +### references + +DEV-942 + +
+ +## 1.210.1 (2023-05-27T18:52:11Z) + +### πŸš€ Enhancements + +
+ Fix tags @aknysh (#701) + +### what + +- Fix tags + +### why + +- Typo + +
+ +### πŸ› Bug Fixes + +
+ Fix tags @aknysh (#701) + +### what + +- Fix tags + +### why + +- Typo + +
+ +## 1.210.0 (2023-05-25T22:06:24Z) + +
+ EKS FAQ for Addons @milldr (#699) + +### what + +Added docs for EKS Cluster Addons + +### why + +FAQ, requested for documentation + +### references + +DEV-846 + +
+ +## 1.209.0 (2023-05-25T19:05:53Z) + +
+ Update ALB controller IAM policy @Nuru (#696) + +### what + +- Update `eks/alb-controller` controller IAM policy + +### why + +- Email from AWS: + > On June 1, 2023, we will be adding an additional layer of security to ELB β€˜Create*' API calls where API callers must + > have explicit access to add tags in their Identity and Access Management (IAM) policy. Currently, access to attach + > tags was implicitly granted with access to 'Create*' APIs. + +### references + +- [Updated IAM policy](https://github.com/kubernetes-sigs/aws-load-balancer-controller/pull/3068) + +
+ +## 1.208.0 (2023-05-24T11:12:15Z) + +
+ Managed rules for AWS Config @zdmytriv (#690) + +### what + +- Added option to specify Managed Rules for AWS Config in addition to Conformance Packs + +### why + +- Managed rules will allows to add and tune AWS predefined rules in addition to Conformance Packs + +### references + +- [About AWS Config Manager Rules](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html) +- [List of AWS Config Managed Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html) + +
+ +## 1.207.0 (2023-05-22T18:40:06Z) + +
+ Corrections to `dms` components @milldr (#658) + +### what + +- Corrections to `dms` components + +### why + +- outputs were incorrect +- set pass and username with ssm + +### references + +- n/a + +
+ +## 1.206.0 (2023-05-20T19:41:35Z) + +
+ Upgrade S3 Bucket module to support recent changes made by AWS team regarding ACL @zdmytriv (#688) + +### what + +- Upgraded S3 Bucket module version + +### why + +- Upgrade S3 Bucket module to support recent changes made by AWS team regarding ACL + +### references + +- https://github.com/cloudposse/terraform-aws-s3-bucket/pull/178 + +
+ +## 1.205.0 (2023-05-19T23:55:14Z) + +
+ feat: add lambda monitors to datadog-monitor @dudymas (#686) + +### what + +- add lambda error monitor +- add datadog lambda log forwarder config monitor + +### why + +- Observability + +
+ +## 1.204.1 (2023-05-19T19:54:05Z) + +### πŸš€ Enhancements + +
+ Update `module "datadog_configuration"` modules @aknysh (#684) + +### what + +- Update `module "datadog_configuration"` modules + +### why + +- The module does not accept the `region` variable +- The module must be always enabled to be able to read the Datadog API keys even if the component is disabled + +
+ +## 1.204.0 (2023-05-18T20:31:49Z) + +
+ `datadog-agent` bugfixes @Benbentwo (#681) + +### what + +- update datadog agent to latest +- remove variable in datadog configuration + +
+ +## 1.203.0 (2023-05-18T19:44:08Z) + +
+ Update `vpc` and `eks/cluster` components @aknysh (#677) + +### what + +- Update `vpc` and `eks/cluster` components + +### why + +- Use latest module versions + +- Take into account `var.availability_zones` for the EKS cluster itself. Only the `node-group` module was using + `var.availability_zones` to use the subnets from the provided AZs. The EKS cluster (control plane) was using all the + subnets provisioned in a VPC. This caused issues because EKS is not available in all AZs in a region, e.g. it's not + available in `us-east-1e` b/c of a limited capacity, and when using all AZs from `us-east-1`, the deployment fails + +- The latest version of the `vpc` component (which was updated in this PR as well) has the outputs to get a map of AZs + to the subnet IDs in each AZ + +``` + # Get only the public subnets that correspond to the AZs provided in `var.availability_zones` + # `az_public_subnets_map` is a map of AZ names to list of public subnet IDs in the AZs + public_subnet_ids = flatten([for k, v in local.vpc_outputs.az_public_subnets_map : v if contains(var.availability_zones, k)]) + + # Get only the private subnets that correspond to the AZs provided in `var.availability_zones` + # `az_private_subnets_map` is a map of AZ names to list of private subnet IDs in the AZs + private_subnet_ids = flatten([for k, v in local.vpc_outputs.az_private_subnets_map : v if contains(var.availability_zones, k)]) +``` + +
+ +## 1.202.0 (2023-05-18T16:15:12Z) + +
+ feat: adds ability to list principals of Lambdas allowed to access ECR @gberenice (#680) + +### what + +- This change allows listing IDs of the accounts allowed to consume ECR. + +### why + +- This is supported by [terraform-aws-ecr](https://github.com/cloudposse/terraform-aws-ecr/tree/main), but not the + component. + +### references + +- N/A + +
+ +## 1.201.0 (2023-05-18T15:08:54Z) + +
+ Introducing AWS Config component @zdmytriv (#675) + +### what + +- Added AWS Config and related `config-bucket` components + +### why + +- Added AWS Config and related `config-bucket` components + +### references + +
+ +## 1.200.1 (2023-05-18T14:52:10Z) + +### πŸš€ Enhancements + +
+ Fix `datadog` components @aknysh (#679) + +### what + +- Fix all `datadog` components + +### why + +- Variable `region` is not supported by the `datadog-configuration/modules/datadog_keys` submodule + +
+ +## 1.200.0 (2023-05-17T09:19:40Z) + +- No changes + +## 1.199.0 (2023-05-16T15:01:56Z) + +
+ `eks/alb-controller-ingress-group`: Corrected Tags to pull LB Data Resource @milldr (#676) + +### what + +- corrected tag reference for pull lb data resource + +### why + +- the tags that are used to pull the ALB that's created should be filtering using the same group_name that is given when + the LB is created + +### references + +- n/a + +
+ +## 1.198.3 (2023-05-15T20:01:18Z) + +### πŸ› Bug Fixes + +
+ Correct `cloudtrail` Account-Map Reference @milldr (#673) + +### what + +- Correctly pull Audit account from `account-map` for `cloudtrail` +- Remove `SessionName` from EKS RBAC user name wrongly added in #668 + +### why + +- account-map remote state was missing from the `cloudtrail` component +- Account names should be pulled from account-map, not using a variable +- Session Name automatically logged in `user.extra.sessionName.0` starting at Kubernetes 1.20, plus addition had a typo + and was only on Teams, not Team Roles + +### references + +- Resolves change requests https://github.com/cloudposse/terraform-aws-components/pull/638#discussion_r1193297727 and + https://github.com/cloudposse/terraform-aws-components/pull/638#discussion_r1193298107 +- Closes #672 +- [Internal Slack thread](https://cloudposse.slack.com/archives/CA4TC65HS/p1684122388801769) + +
+ +## 1.198.2 (2023-05-15T19:47:39Z) + +### πŸš€ Enhancements + +
+ bump config yaml dependency on account component as it still depends on hashicorp template provider @lantier (#671) + +### what + +- Bump [cloudposse/config/yaml](https://github.com/cloudposse/terraform-yaml-config) module dependency from version + 1.0.1 to 1.0.2 + +### why + +- 1.0.1 still uses hashicorp/template provider, which has no M1 binary equivalent, 1.0.2 already uses the cloudposse + version which has the binary + +### references + +- (https://github.com/cloudposse/terraform-yaml-config/releases/tag/1.0.2) + +
+ +## 1.198.1 (2023-05-15T18:55:09Z) + +### πŸ› Bug Fixes + +
+ Fixed `route53-resolver-dns-firewall` for the case when logging is disabled @zdmytriv (#669) + +### what + +- Fixed `route53-resolver-dns-firewall` for the case when logging is disabled + +### why + +- Component still required bucket when logging disabled + +### references + +
+ +## 1.198.0 (2023-05-15T17:37:47Z) + +
+ Add `aws-shield` component @aknysh (#670) + +### what + +- Add `aws-shield` component + +### why + +- The component is responsible for enabling AWS Shield Advanced Protection for the following resources: + + - Application Load Balancers (ALBs) + - CloudFront Distributions + - Elastic IPs + - Route53 Hosted Zones + +This component also requires that the account where the component is being provisioned to has been +[subscribed to AWS Shield Advanced](https://docs.aws.amazon.com/waf/latest/developerguide/enable-ddos-prem.html). + +
+ +## 1.197.2 (2023-05-15T15:25:39Z) + +### πŸš€ Enhancements + +
+ EKS terraform module variable type fix @PiotrPalkaSpotOn (#674) + +### what + +- use `bool` rather than `string` type for a variable that's designed to hold `true`/`false` value + +### why + +- using `string` makes the + [if .Values.pvc_enabled](https://github.com/SpotOnInc/cloudposse-actions-runner-controller-tf-module-bugfix/blob/f224c7a4ee8b2ab4baf6929710d6668bd8fc5e8c/modules/eks/actions-runner-controller/charts/actions-runner/templates/runnerdeployment.yaml#L1) + condition always true and creates persistent volumes even if they're not intended to use + +
+ +## 1.197.1 (2023-05-11T20:39:03Z) + +### πŸ› Bug Fixes + +
+ Remove (broken) root access to EKS clusters @Nuru (#668) + +### what + +- Remove (broken) root access to EKS clusters +- Include session name in audit trail of users accessing EKS + +### why + +- Test code granting access to all `root` users and roles was accidentally left in #645 and breaks when Tenants are part + of account names +- There is no reason to allow `root` users to access EKS clusters, so even when this code worked it was wrong +- Audit trail can keep track of who is performing actions + +### references + +- https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-iam-roles-when-multiple-users-need-identical-access-to-the-cluster + +
+ +## 1.197.0 (2023-05-11T17:59:40Z) + +
+ `rds` Component readme update @Benbentwo (#667) + +### what + +- Updating default example from mssql to postgres + +
+ +## 1.196.0 (2023-05-11T17:56:41Z) + +
+ Update `vpc-flow-logs` @milldr (#649) + +### what + +- Modernized `vpc-flow-logs` with latest conventions + +### why + +- Old version of the component was significantly out of date +- #498 + +### references + +- DEV-880 + +
+ +## 1.195.0 (2023-05-11T07:27:29Z) + +
+ Add `iam-policy` to `ecs-service` @milldr (#663) + +### what + +Add an option to attach the `iam-policy` resource to `ecs-service` + +### why + +This policy is already created, but is missing its attachment. We should attach this to the resource when enabled + +### references + +https://cloudposse.slack.com/archives/CA4TC65HS/p1683729972134479 + +
+ +## 1.194.0 (2023-05-10T18:36:37Z) + +
+ upstream `acm` and `datadog-integration` @Benbentwo (#666) + +### what + +- ACM allows disabling `*.my.domain` +- Datadog-Integration supports allow-list'ing regions + +
+ +## 1.193.0 (2023-05-09T16:00:08Z) + +
+ Add `route53-resolver-dns-firewall` and `network-firewall` components @aknysh (#651) + +### what + +- Add `route53-resolver-dns-firewall` component +- Add `network-firewall` component + +### why + +- The `route53-resolver-dns-firewall` component is responsible for provisioning + [Route 53 Resolver DNS Firewall](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver-dns-firewall.html) + resources, including Route 53 Resolver DNS Firewall, domain lists, firewall rule groups, firewall rules, and logging + configuration + +- The `network-firewall` component is responsible for provisioning + [AWS Network Firewall](https://aws.amazon.com/network-firewal) resources, including Network Firewall, firewall policy, + rule groups, and logging configuration + +
+ +## 1.192.0 (2023-05-09T15:40:43Z) + +
+ [ecs-service] Added IAM policies for ecspresso deployments @goruha (#659) + +### what + +- [ecs-service] Added IAM policies for [Ecspresso](https://github.com/kayac/ecspresso) deployments + +
+ +## 1.191.0 (2023-05-05T22:16:44Z) + +
+ `elasticsearch` Corrections @milldr (#662) + +### what + +- Modernize Elasticsearch component + +### why + +- `elasticsearch` was not deployable as is. Added up-to-date config + +### references + +- n/a + +
+ +## 1.190.0 (2023-05-05T18:46:26Z) + +
+ fix: remove stray component.yaml in lambda @dudymas (#661) + +### what + +- Remove the `component.yaml` in the lambda component + +### why + +- Vendoring would potentially cause conflicts + +
+ +## 1.189.0 (2023-05-05T18:22:04Z) + +
+ fix: eks/efs-controller iam policy updates @dudymas (#660) + +### what + +- Update the iam policy for eks/efs-controller + +### why + +- Older permissions will not work with new versions of the controller + +### references + +- [official iam policy sample](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/docs/iam-policy-example.json) + +
+ +## 1.188.0 (2023-05-05T17:05:23Z) + +
+ Move `eks/efs` to `efs` @milldr (#653) + +### what + +- Moved `eks/efs` to `efs` + +### why + +- `efs` shouldn't be a submodule of `eks`. You can deploy EFS without EKS + +### references + +- n/a + +
+ +## 1.187.0 (2023-05-04T23:04:26Z) + +
+ ARC enhancement, aws-config bugfix, DNS documentation @Nuru (#655) + +### what + +- Fix bug in `aws-config` +- Enhance documentation to explain relationship of `dns-primary` and `dns-delegated` components and `dns` account +- [`eks/actions-runner-controller`] Add support for annotations and improve support for ephemeral storage + +### why + +- Bugfix +- Customer query, supersedes and closes #652 +- Better support for longer lived jobs + +### references + +- https://github.com/actions/actions-runner-controller/issues/2562 + +
+ +## 1.186.0 (2023-05-04T18:15:31Z) + +
+ Update `RDS` @Benbentwo (#657) + +### what + +- Update RDS Modules +- Allow disabling Monitoring Role + +### why + +- Monitoring not always needed +- Context.tf Updates in modules + +
+ +## 1.185.0 (2023-04-26T21:30:24Z) + +
+ Add `amplify` component @aknysh (#650) + +### what + +- Add `amplify` component + +### why + +- Terraform component to provision AWS Amplify apps, backend environments, branches, domain associations, and webhooks + +### references + +- https://aws.amazon.com/amplify + +
+ +## 1.184.0 (2023-04-25T14:29:29Z) + +
+ Upstream: `eks/ebs-controller` @milldr (#640) + +### what + +- Added component for `eks/ebs-controller` + +### why + +- Upstreaming this component for general use + +### references + +- n/a + +
+ +## 1.183.0 (2023-04-24T23:21:17Z) + +
+ GitHub OIDC FAQ @milldr (#648) + +### what + +Added common question for GHA + +### why + +This is asked frequently + +### references + +https://cloudposse.slack.com/archives/C04N39YPVAS/p1682355553255269 + +
+ +## 1.182.1 (2023-04-24T19:37:31Z) + +### πŸš€ Enhancements + +
+ [aws-config] Update usage info, add "help" and "teams" commands @Nuru (#647) + +### what + +Update `aws-config` command: + +- Add `teams` command and suggest "aws-config-teams" file name instead of "aws-config-saml" because we want to use + "aws-config-teams" for both SAML and SSO logins with Leapp handling the difference. +- Add `help` command +- Add more extensive help +- Do not rely on script generated by `account-map` for command `main()` function + +### why + +- Reflect latest design pattern +- Improved user experience + +
+ +## 1.182.0 (2023-04-21T17:20:14Z) + +
+ Athena CloudTrail Queries @milldr (#638) + +### what + +- added cloudtrail integration to athena +- conditionally allow audit account to decrypt kms key used for cloudtrail + +### why + +- allow queries against cloudtrail logs from a centralized account (audit) + +### references + +n/a + +
+ +## 1.181.0 (2023-04-20T22:00:24Z) + +
+ Format Identity Team Access Permission Set Name @milldr (#646) + +### what + +- format permission set roles with hyphens + +### why + +- pretty Permission Set naming. We want `devops-super` to format to `IdentityDevopsSuperTeamAccess` + +### references + +https://github.com/cloudposse/refarch-scaffold/pull/127 + +
+ +## 1.180.0 (2023-04-20T21:12:28Z) + +
+ Fix `s3-bucket` `var.bucket_name` @milldr (#637) + +### what + +changed default value for bucket name to empty string not null + +### why + +default bucket name should be empty string not null. Module checks against name length + +### references + +n/a + +
+ +## 1.179.0 (2023-04-20T20:26:20Z) + +
+ ecs-service: fix lint issues @kevcube (#636) + +
+ +## 1.178.0 (2023-04-20T20:23:10Z) + +
+ fix:aws-team-roles have stray locals @dudymas (#642) + +### what + +- remove locals from modules/aws-team-roles + +### why + +- breaks component when it tries to configure locals (the remote state for account_map isn't around) + +
+ +## 1.177.0 (2023-04-20T05:13:53Z) + +
+ Convert eks/cluster to aws-teams and aws-sso @Nuru (#645) + +### what + +- Convert `eks/cluster` to `aws-teams` +- Add `aws-sso` support to `eks/cluster` +- Undo automatic allowance of `identity` `aws-sso` permission sets into account roles added in #567 + +### why + +- Keep in sync with other modules +- #567 is a silent privilege escalation and not needed to accomplish desired goals + +
+ +## 1.176.1 (2023-04-19T14:20:27Z) + +### πŸš€ Enhancements + +
+ fix: Use `vpc` without tenant @MaxymVlasov (#644) + +### why + +```bash +β”‚ Error: Error in function call +β”‚ +β”‚ on remote-state.tf line 10, in module "vpc_flow_logs_bucket": +β”‚ 10: tenant = coalesce(var.vpc_flow_logs_bucket_tenant_name, module.this.tenant) +β”‚ β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ while calling coalesce(vals...) +β”‚ β”‚ module.this.tenant is "" +β”‚ β”‚ var.vpc_flow_logs_bucket_tenant_name is null +β”‚ +β”‚ Call to function "coalesce" failed: no non-null, non-empty-string +β”‚ arguments. +``` + +
+ +## 1.176.0 (2023-04-18T18:46:38Z) + +
+ feat: cloudtrail-bucket can have acl configured @dudymas (#643) + +### what + +- add `acl` var to `cloudtrail-bucket` component + +### why + +- Creating new cloudtrail buckets will fail if the acl isn't set to private + +### references + +- This is part of + [a security update from AWS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-faq.html) + +
+ +## 1.175.0 (2023-04-11T12:11:46Z) + +
+ [argocd-repo] Added ArgoCD git commit notifications @goruha (#633) + +### what + +- [argocd-repo] Added ArgoCD git commit notifications + +### why + +- ArgoCD sync deployment + +
+ +## 1.174.0 (2023-04-11T08:53:06Z) + +
+ [argocd] Added github commit status notifications @goruha (#631) + +### what + +- [argocd] Added github commit status notifications + +### why + +- ArgoCD sync deployment fix concurrent issue + +
+ +## 1.173.0 (2023-04-06T19:21:23Z) + +
+ Missing Version Pins for Bats @milldr (#629) + +### what + +added missing provider version pins + +### why + +missing provider versions, required for bats + +### references + +#626 #628, #627 + +
+ +## 1.172.0 (2023-04-06T18:32:04Z) + +
+ update datadog_lambda_forwarder ref for darwin_arm64 @kevcube (#626) + +### what + +- update datadog-lambda-forwarder module for darwin_arm64 + +### why + +- run on Darwin_arm64 hardware + +
+ +## 1.171.0 (2023-04-06T18:11:40Z) + +
+ Version Pinning Requirements @milldr (#628) + +### what + +- missing bats requirements resolved + +### why + +- PR #627 missed a few bats requirements in submodules + +### references + +- #627 +- #626 + +
+ +## 1.170.0 (2023-04-06T17:38:24Z) + +
+ Bats Version Pinning @milldr (#627) + +### what + +- upgraded pattern for version pinning + +### why + +- bats would fail for all of these components unless these versions are pinned as such + +### references + +- https://github.com/cloudposse/terraform-aws-components/pull/626 + +
+ +## 1.169.0 (2023-04-05T20:28:39Z) + +
+ [eks/actions-runner-controller]: support Runner Group, webhook queue size @Nuru (#621) + +### what + +- `eks/actions-runner-controller` + - Support + [Runner Groups](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) + - Enable configuration of the webhook queue size limit + - Change runner controller Docker image designation +- Add documentation on Runner Groups and Autoscaler configuration + +### why + +- Enable separate access control to self-hosted runners +- For users that launch a large number of jobs in a short period of time, allow bigger queues to avoid losing jobs +- Maintainers recommend new tag format. `ghcr.io` has better rate limits than `docker.io`. + +### references + +- https://github.com/actions/actions-runner-controller/issues/2056 + +
+ +## 1.168.0 (2023-04-04T21:48:58Z) + +
+ s3-bucket: use cloudposse template provider for arm64 @kevcube (#618) + +### what + +- use cloud posse's template provider + +### why + +- arm64 +- also this provider was not pinned in versions.tf so that had to be fixed somehow + +### references + +- closes #617 + +
+ +## 1.167.0 (2023-04-04T18:14:45Z) + +
+ chore: aws-sso modules updated to 1.0.0 @dudymas (#623) + +### what + +- upgrade aws-sso modules: permission_sets, sso_account_assignments, and sso_account_assignments_root + +### why + +- upstream updates + +
+ +## 1.166.0 (2023-04-03T13:39:53Z) + +
+ Add `datadog-synthetics` component @aknysh (#619) + +### what + +- Add `datadog-synthetics` component + +### why + +- This component is responsible for provisioning Datadog synthetic tests + +- Supports Datadog synthetics private locations + + - https://docs.datadoghq.com/getting_started/synthetics/private_location + - https://docs.datadoghq.com/synthetics/private_locations + +- Synthetic tests allow you to observe how your systems and applications are performing using simulated requests and + actions from the AWS managed locations around the globe and to monitor internal endpoints from private locations + +
+ +## 1.165.0 (2023-03-31T22:11:26Z) + +
+ Update `eks/cluster` README @milldr (#616) + +### what + +- Updated the README with EKS cluster + +### why + +The example stack is outdated. Add notes for Github OIDC and karpenter + +### references + +https://cloudposse.atlassian.net/browse/DEV-835 + +
+ +## 1.164.1 (2023-03-30T20:03:15Z) + +### πŸš€ Enhancements + +
+ spacelift: Update README.md example login policy @johncblandii (#597) + +### what + +- Added support for allowing spaces read access to all members +- Added a reference for allowing spaces write access to the "Developers" group + +### why + +- Spacelift moved to Spaces Access Control + +### references + +- https://docs.spacelift.io/concepts/spaces/access-control + +
+ +## 1.164.0 (2023-03-30T16:25:28Z) + +
+ Update several component Readmes @Benbentwo (#611) + +### what + +- Update Readmes of many components from Refarch Docs + +
+ +## 1.163.0 (2023-03-29T19:52:46Z) + +
+ add providers to `mixins` folder @Benbentwo (#613) + +### what + +- Copies some common providers to the mixins folder + +### why + +- Have a central place where our common providers are held. + +
+ +## 1.162.0 (2023-03-29T19:30:15Z) + +
+ Added ArgoCD GitHub notification subscription @goruha (#615) + +### what + +- Added ArgoCD GitHub notification subscription + +### why + +- To use synchronous deployment pattern + +
+ +## 1.161.1 (2023-03-29T17:20:27Z) + +### πŸš€ Enhancements + +
+ waf component, update dependency versions for aws provider and waf terraform module @arcaven (#612) + +### what + +- updates to waf module: + - aws provider from ~> 4.0 to => 4.0 + - module cloudposse/waf/aws from 0.0.4 to 0.2.0 + - different recommended catalog entry + +### why + +- @aknysh suggested some updates before we start using waf module + +
+ +## 1.161.0 (2023-03-28T19:51:27Z) + +
+ Quick fixes to EKS/ARC arm64 Support @Nuru (#610) + +### what + +- While supporting EKS/ARC `arm64`, continue to deploy `amd64` by default +- Make `tolerations.value` optional + +### why + +- Majority of echosystem support is currently `amd64` +- `tolerations.value` is option in Kubernetes spec + +### references + +- Corrects issue which escaped review in #609 + +
+ +## 1.160.0 (2023-03-28T18:26:20Z) + +
+ Upstream EKS/ARC amd64 Support @milldr (#609) + +### what + +Added arm64 support for eks/arc + +### why + +when supporting both amd64 and arm64, we need to select the correct architecture + +### references + +https://github.com/cloudposse/infra-live/pull/265 + +
+ +## 1.159.0 (2023-03-27T16:19:29Z) + +
+ Update account-map to output account information for aws-config script @Nuru (#608) + +### what + +- Update `account-map` to output account information for `aws-config` script +- Output AWS profile name for root of credential chain + +### why + +- Enable `aws-config` to output account IDs and to generate configuration for "AWS Extend Switch Roles" browser plugin +- Support multiple namespaces in a single infrastructure repo + +
+ +
+ Update CODEOWNERS to remove contributors @Nuru (#607) + +### what + +- Update CODEOWNERS to remove contributors + +### why + +- Require approval from engineering team (or in some cases admins) for all changes, to keep better quality control on + this repo + +
+ +## 1.158.0 (2023-03-27T03:41:43Z) + +
+ Upstream latest datadog-agent and datadog-configuration updates @nitrocode (#598) + +### what + +- Upstream latest datadog-agent and datadog-configuration updates + +### why + +- datadog irsa role +- removing unused input vars +- default to `public.ecr.aws` images +- ignore deprecated `default.auto.tfvars` +- move `datadog-agent` to `eks/` subfolder for consistency with other helm charts + +### references + +N/A + +
+ +## 1.157.0 (2023-03-24T19:12:17Z) + +
+ Remove `root_account_tenant_name` @milldr (#605) + +### what + +- bumped ecr +- remove unnecessary variable + +### why + +- ECR version update +- We shouldn't need to set `root_account_tenant_name` in providers +- Some Terraform docs are out-of-date + +### references + +- n/a + +
+ +## 1.156.0 (2023-03-23T21:03:46Z) + +
+ exposing variables from 2.0.0 of `VPC` module @Benbentwo (#604) + +### what + +- Adding vars for vpc module and sending them directly to module + +### references + +- https://github.com/cloudposse/terraform-aws-vpc/blob/master/variables.tf#L10-L44 + +
+ +## 1.155.0 (2023-03-23T02:01:29Z) + +
+ Add Privileged Option for GH OIDC @milldr (#603) + +### what + +- allow gh oidc role to use privileged as option for reading tf backend + +### why + +- If deploying GH OIDC with a component that needs to be applied with SuperAdmin (aws-teams) we need to set privileged + here + +### references + +- https://cloudposse.slack.com/archives/C04N39YPVAS/p1679409325357119 + +
+ +## 1.154.0 (2023-03-22T17:40:35Z) + +
+ update `opsgenie-team` to be delete-able via `enabled: false` @Benbentwo (#589) + +### what + +- Uses Datdaog Configuration as it's source of datadog variables +- Now supports `enabled: false` on a team to destroy it. + +
+ +## 1.153.0 (2023-03-21T19:22:03Z) + +
+ Upstream AWS Teams components @milldr (#600) + +### what + +- added eks view only policy + +### why + +- Provided updates from recent contracts + +### references + +- https://github.com/cloudposse/refarch-scaffold/pull/99 + +
+ +## 1.152.0 (2023-03-21T15:42:51Z) + +
+ upstream 'datadog-lambda-forwarder' @gberenice (#601) + +### what + +- Upgrade 'datadog-lambda-forwarder' component to v1.3.0 + +### why + +- Be able [to forward Cloudwatch Events](https://github.com/cloudposse/terraform-aws-datadog-lambda-forwarder/pull/48) + via components. + +### references + +- N/A + +
+ +## 1.151.0 (2023-03-15T15:56:20Z) + +
+ Upstream `eks/external-secrets-operator` @milldr (#595) + +### what + +- Adding new module for `eks/external-secrets-operator` + +### why + +- Other customers want to use this module now, and it needs to be upstreamed + +### references + +- n/a + +
+ +## 1.150.0 (2023-03-14T20:20:41Z) + +
+ chore(spacelift): update with dependency resource @dudymas (#594) + +### what + +- update spacelift component to 0.55.0 + +### why + +- support feature flag for spacelift_stack_dependency resource + +### references + +- [spacelift module 0.55.0](https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/releases/tag/0.55.0) + +
+ +## 1.149.0 (2023-03-13T15:25:25Z) + +
+ Fix SSO SAML provider fixes @goruha (#592) + +### what + +- Fix SSO SAML provider fixes + +
+ +## 1.148.0 (2023-03-10T18:07:36Z) + +
+ ArgoCD SSO improvements @goruha (#590) + +### what + +- ArgoCD SSO improvements + +
+ +## 1.147.0 (2023-03-10T17:52:18Z) + +
+ Upstream: `eks/echo-server` @milldr (#591) + +### what + +- Adding the `ingress.alb.group_name` annotation to Echo Server + +### why + +- Required to set the ALB specifically, rather than using the default + +### references + +- n/a + +
+ +## 1.146.0 (2023-03-08T23:13:13Z) + +
+ Improve platform and external-dns for release engineering @goruha (#588) + +### what + +- `eks/external-dns` support `dns-primary` +- `eks/platform` support json query remote components outputs + +### why + +- `vanity domain` pattern support by `eks/external-dns` +- Improve flexibility of `eks/platform` + +
+ +## 1.145.0 (2023-03-07T00:28:25Z) + +
+ `eks/actions-runner-controller`: use coalesce @Benbentwo (#586) + +### what + +- use coalesce instead of try, as we need a value passed in here + +
+ +## 1.144.0 (2023-03-05T20:24:09Z) + +
+ Upgrade Remote State to `1.4.1` @milldr (#585) + +### what + +- Upgrade _all_ remote state modules (`cloudposse/stack-config/yaml//modules/remote-state`) to version `1.4.1` + +### why + +- In order to use go templating with Atmos, we need to use the latest cloudposse/utils version. This version is + specified by `1.4.1` + +### references + +- https://github.com/cloudposse/terraform-yaml-stack-config/releases/tag/1.4.1 + +
+ +## 1.143.0 (2023-03-02T18:07:53Z) + +
+ bugfix: rds anomalies monitor not sending team information @Benbentwo (#583) + +### what + +- Update monitor to have default CP tags + +
+ +## 1.142.0 (2023-03-02T17:49:40Z) + +
+ datadog-lambda-forwarder: if s3_buckets not set, module fails @kevcube (#581) + +This module attempts to do length() on the value for s3_buckets. + +We are not using s3_buckets, and it defaults to null, so length() fails. + +
+ +## 1.141.0 (2023-03-01T19:10:07Z) + +
+ `datadog-monitors`: Team Grouping @Benbentwo (#580) + +### what + +- grouping by team helps ensure the team tag is sent to Opsgenie + +### why + +- ensures most data is fed to a valid team tag instead of `@opsgenie-` + +
+ +## 1.140.0 (2023-02-28T18:47:44Z) + +
+ `spacelift` add missing `var.region` @johncblandii (#574) + +### what + +- Added the missing `var.region` + +### why + +- The AWS provider requires it and it was not available + +### references + +
+ +## 1.139.0 (2023-02-28T18:46:35Z) + +
+ datadog monitors improvements @Benbentwo (#579) + +### what + +- Datadog monitor improvements + - Prepends `()` e.g. `(tenant-environment-stage)` + - Fixes some messages that had improper syntax - dd uses `{{ var.name }}` + +### why + +- Datadog monitor improvements + +
+ +## 1.138.0 (2023-02-28T18:45:48Z) + +
+ update `account` readme.md @Benbentwo (#570) + +### what + +- Updated account readme + +
+ +## 1.137.0 (2023-02-27T20:39:34Z) + +
+ Update `eks/cluster` @Benbentwo (#578) + +### what + +- Update EKS Cluster Module to re-include addons + +
+ +## 1.136.0 (2023-02-27T17:36:47Z) + +
+ Set spacelift-worker-pool ami explicitly to x86_64 @arcaven (#577) + +### why + +- autoscaling group for spacelift-worker-pool will fail to launch when new arm64 images return first +- arm64 ami image is being returned first at the moment in us-east-1 + +### what + +- set spacelift-worker-pool ami statically to return only x86_64 results + +### references + +- Spacelift Worker Pool ASG may fail to scale due to ami/instance type mismatch #575 +- Note: this is an alternative to spacelift-worker-pool README update and AMI limits #573 which I read after, but I + think this filter approach will be more easily be refactored into setting this as an attribute in variables.tf in the + near future + +
+ +## 1.135.0 (2023-02-27T13:56:48Z) + +
+ github-runners add support for runner groups @johncblandii (#569) + +### what + +- Added optional support for separating runners by groups + +NOTE: I don't know if the default of `default` is valid or if it is `Default`. I'll confirm this soon. + +### why + +- Groups are supported by GitHub and allow for Actions to target specific runners by group vs by label + +### references + +- https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups + +
+ +## 1.134.0 (2023-02-24T20:59:40Z) + +
+ [account-map] Update remote config module version @goruha (#572) + +### what + +- Update remote config module version `1.4.1` + +### why + +- Solve terraform module version conflict + +
+ +## 1.133.0 (2023-02-24T17:55:52Z) + +
+ Fix ArgoCD minor issues @goruha (#571) + +### what + +- Fix slack notification annotations +- Fix CRD creation order + +### why + +- Fix ArgoCD bootstrap + +
+ +## 1.132.0 (2023-02-23T04:33:29Z) + +
+ Add spacelift-policy component @nitrocode (#556) + +### what + +- Add spacelift-policy component + +### why + +- De-couple policy creation from admin and child stacks +- Auto attach policies to remove additional terraform management of resources + +### references + +- Depends on PR https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/pull/134 + +
+ +## 1.131.0 (2023-02-23T01:13:58Z) + +
+ SSO upgrades and Support for Assume Role from Identity Users @johncblandii (#567) + +### what + +- Upgraded `aws-sso` to use `0.7.1` modules +- Updated `account-map/modules/roles-to-principals` to support assume role from SSO users in the identity account +- Adjusted `aws-sso/policy-Identity-role-RoleAccess.tf` to use the identity account name vs the stage so it supports + names like `core-identity` instead of just `identity` + +### why + +- `aws-sso` users could not assume role to plan/apply terraform locally +- using `core-identity` as a name broke the `aws-sso` policy since account `identity` does not exist in + `full_account_map` + +### references + +
+ +## 1.130.0 (2023-02-21T18:33:53Z) + +
+ Add Redshift component @max-lobur (#563) + +### what + +- Add Redshift + +### why + +- Fulfilling the AWS catalog + +### references + +- https://github.com/cloudposse/terraform-aws-redshift-cluster + +
+ +## 1.129.0 (2023-02-21T16:45:43Z) + +
+ update dd agent docs @Benbentwo (#565) + +### what + +- Update Datadog Docs to be more clear on catalog entry + +
+ +## 1.128.0 (2023-02-18T16:28:11Z) + +
+ feat: updates spacelift to support policies outside of the comp folder @Gowiem (#522) + +### what + +- Adds back `policies_by_name_path` variable to spacelift component + +### why + +- Allows specifying spacelift policies outside of the component folder + +### references + +- N/A + +
+ +## 1.127.0 (2023-02-16T17:53:31Z) + +
+ [sso-saml-provider] Upstream SSO SAML provider component @goruha (#562) + +### what + +- [sso-saml-provider] Upstream SSO SAML provider component + +### why + +- Required for ArgoCD + +
+ +## 1.126.0 (2023-02-14T23:01:00Z) + +
+ upstream `opsgenie-team` @Benbentwo (#561) + +### what + +- Upstreams latest opsgenie-team component + +
+ +## 1.125.0 (2023-02-14T21:45:32Z) + +
+ [eks/argocd] Upstream ArgoCD @goruha (#560) + +### what + +- Upstream `eks/argocd` + +
+ +## 1.124.0 (2023-02-14T17:34:29Z) + +
+ `aws-backup` upstream @Benbentwo (#559) + +### what + +- Update `aws-backup` to latest + +
+ +## 1.123.0 (2023-02-13T22:42:56Z) + +
+ upstream lambda pt2 @Benbentwo (#558) + +### what + +- Add archive zip +- Change to python (no compile) + +
+ +## 1.122.0 (2023-02-13T21:24:02Z) + +
+ upstream `lambda` @Benbentwo (#557) + +### what + +- Upstream `lambda` component + +### why + +- Quickly deploy serverless code + +
+ +## 1.121.0 (2023-02-13T16:59:16Z) + +
+ Upstream `ACM` and `eks/Platform` for release_engineering @Benbentwo (#555) + +### what + +- ACM Component outputs it's acm url +- EKS/Platform will deploy many terraform outputs to SSM + +### why + +- These components are required for CP Release Engineering Setup + +
+ +## 1.120.0 (2023-02-08T16:34:25Z) + +
+ Upstream datadog logs archive @Benbentwo (#552) + +### what + +- Upstream DD Logs Archive + +
+ +## 1.119.0 (2023-02-07T21:32:25Z) + +
+ Upstream `dynamodb` @milldr (#512) + +### what + +- Updated the `dynamodb` component + +### why + +- maintaining up-to-date upstream component + +### references + +- N/A + +
+ +## 1.118.0 (2023-02-07T20:15:17Z) + +
+ fix dd-forwarder: datadog service config depends on lambda arn config @raybotha (#531) + +
+ +## 1.117.0 (2023-02-07T19:44:32Z) + +
+ Upstream `spa-s3-cloudfront` @milldr (#500) + +### what + +- Added missing component from upstream `spa-s3-cloudfront` + +### why + +- We use this component to provision Cloudfront and related resources + +### references + +- N/A + +
+ +## 1.116.0 (2023-02-07T00:52:27Z) + +
+ Upstream `aurora-mysql` @milldr (#517) + +### what + +- Upstreaming both `aurora-mysql` and `aurora-mysql-resources` + +### why + +- Added option for allowing ingress by account name, rather than requiring CIDR blocks copy and pasted +- Replaced the deprecated provider for MySQL +- Resolved issues with Terraform perma-drift for the resources component with granting "ALL" + +### references + +- Old provider, archived: https://github.com/hashicorp/terraform-provider-mysql +- New provider: https://github.com/petoju/terraform-provider-mysql + +
+ +## 1.115.0 (2023-02-07T00:49:59Z) + +
+ Upstream `aurora-postgres` @milldr (#518) + +### what + +- Upstreaming `aurora-postgres` and `aurora-postgres-resources` + +### why + +- TLC for these components +- Added options for adding ingress by account +- Cleaned up the submodule for the resources component +- Support creating schemas +- Support conditionally pulling passwords from SSM, similar to `aurora-mysql` + +
+ +## 1.114.0 (2023-02-06T17:09:31Z) + +
+ `datadog-private-locations` update helm provider @Benbentwo (#549) + +### what + +- Updates Helm Provider to the latest + +### why + +- New API Version + +
+ +## 1.113.0 (2023-02-06T02:26:22Z) + +
+ Remove extra var from stack example @johncblandii (#550) + +### what + +- Stack example has an old variable defined + +### why + +- `The root module does not declare a variable named "eks_tags_enabled" but a value was found in file "uw2-automation-vpc.terraform.tfvars.json".` + +### references + +
+ +## 1.112.1 (2023-02-03T20:00:09Z) + +### πŸš€ Enhancements + +
+ Fixed non-html tags that fails rendering on docusaurus @zdmytriv (#546) + +### what + +- Fixed non-html tags + +### why + +- Rendering has been failing on docusaurus mdx/jsx engine + +
+ +## 1.112.0 (2023-02-03T19:02:57Z) + +
+ `datadog-agent` allow values var merged @Benbentwo (#548) + +### what + +- Allows values to be passed in and merged to values file + +### why + +- Need to be able to easily override values files + +
+ +## 1.111.0 (2023-01-31T23:02:57Z) + +
+ Update echo and alb-controller-ingress-group @Benbentwo (#547) + +### what + +- Allows target group to be targeted by echo server + +
+ +## 1.110.0 (2023-01-26T00:25:13Z) + +
+ Chore/acme/bootcamp core tenant @dudymas (#543) + +### what + +- upgrade the vpn module in the ec2-client-vpn component +- and protect outputs on ec2-client-vpn + +### why + +- saml docs were broken in refarch-scaffold. module was trying to alter the cert provider + +
+ +## 1.109.0 (2023-01-24T20:01:56Z) + +
+ Chore/acme/bootcamp spacelift @dudymas (#545) + +### what + +- adjust the type of context_filters in spacelift + +### why + +- was getting errors trying to apply spacelift component + +
+ +## 1.108.0 (2023-01-20T22:36:54Z) + +
+ EC2 Client VPN Version Bump @Benbentwo (#544) + +### what + +- Bump Version of EC2 Client VPN + +### why + +- Bugfixes issue with TLS provider + +### references + +- https://github.com/cloudposse/terraform-aws-ec2-client-vpn/pull/58 +- https://github.com/cloudposse/terraform-aws-ssm-tls-self-signed-cert/pull/20 + +
+ +## 1.107.0 (2023-01-19T17:34:33Z) + +
+ Update pod security context schema in cert-manager @max-lobur (#538) + +### what + +Pod security context `enabled` field has been deprecated. Now you just specify the options and that's it. Update the +options per recent schema. See references + +Tested on k8s 1.24 + +### why + +- Otherwise it does not pass Deployment validation on newer clusters. + +### references + +https://github.com/cert-manager/cert-manager/commit/c17b11fa01455eb1b83dce0c2c06be555e4d53eb + +
+ +## 1.106.0 (2023-01-18T15:36:52Z) + +
+ Fix github actions runner controller default variables @max-lobur (#542) + +### what + +Default value for string is null, not false + +### why + +- Otherwise this does not pass schema when you deploy it without storage requests + +
+ +## 1.105.0 (2023-01-18T15:24:11Z) + +
+ Update k8s metrics-server to latest @max-lobur (#537) + +### what + +Upgrade metrics-server Tested on k8s 1.24 via `kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes"` + +### why + +- The previous one was so old that bitnami has even removed the chart. + +
+ +## 1.104.0 (2023-01-18T14:52:58Z) + +
+ Pin kubernetes provider in metrics-server @max-lobur (#541) + +### what + +- Pin the k8s provider version +- Update versions + +### why + +- Fix CI + +### references + +- https://github.com/cloudposse/terraform-aws-components/pull/537 + +
+ +## 1.103.0 (2023-01-17T21:09:56Z) + +
+ fix(dns-primary/acm): include zone_name arg @dudymas (#540) + +### what + +- in dns-primary, revert version of acm module 0.17.0 -> 0.16.2 (17 is a preview) + +### why + +- primary zones must be specified now that names are trimmed before the dot (.) + +
+ +## 1.102.0 (2023-01-17T16:09:59Z) + +
+ Fix typo in karpenter-provisioner @max-lobur (#539) + +### what + +I formatted it last moment and did not notice that actually changed the object. Fixing that and reformatting all of it +so it's more obvious for future maintainers. + +### why + +- Fixing bug + +### references + +https://github.com/cloudposse/terraform-aws-components/pull/536 + +
+ +## 1.101.0 (2023-01-17T07:47:30Z) + +
+ Support setting consolidation in karpenter-provisioner @max-lobur (#536) + +### what + +This is an alternative way of deprovisioning - proactive one. + +``` +There is another way to configure Karpenter to deprovision nodes called Consolidation. +This mode is preferred for workloads such as microservices and is incompatible with setting +up the ttlSecondsAfterEmpty . When set in consolidation mode Karpenter works to actively +reduce cluster cost by identifying when nodes can be removed as their workloads will run +on other nodes in the cluster and when nodes can be replaced with cheaper variants due +to a change in the workloads +``` + +### why + +- To let users set a more aggressive deprovisioning strategy + +### references + +- https://ec2spotworkshops.com/karpenter/050_karpenter/consolidation.html + +
+ +## 1.100.0 (2023-01-17T07:41:58Z) + +
+ Sync karpenter chart values with the schema @max-lobur (#535) + +### what + +Based on +https://github.com/aws/karpenter/blob/92b3d4a0b029cae6a9d6536517ba42d70c3ebf8c/charts/karpenter/values.yaml#L129-L142 +all these should go under settings.aws + +### why + +Ensure compatibility with the new charts + +### references + +Based on https://github.com/aws/karpenter/blob/92b3d4a0b029cae6a9d6536517ba42d70c3ebf8c/charts/karpenter/values.yaml + +
+ +## 1.99.0 (2023-01-13T14:59:16Z) + +
+ fix(aws-sso): dont hardcode account name for root @dudymas (#534) + +### what + +- remove hardcoding for root account moniker +- change default tenant from `gov` to `core` (now convention) + +### why + +- tenant is not included in the account prefix. In this case, changed to be 'core' +- most accounts do not use `gov` as the root tenant + +
+ +## 1.98.0 (2023-01-12T00:12:36Z) + +
+ Bump spacelift to latest @nitrocode (#532) + +### what + +- Bump spacelift to latest + +### why + +- Latest + +### references + +N/A + +
+ +## 1.97.0 (2023-01-11T01:16:33Z) + +
+ Upstream EKS Action Runner Controller @milldr (#528) + +### what + +- Upstreaming the latest additions for the EKS actions runner controller component + +### why + +- We've added additional features for the ARC runners, primarily adding options for ephemeral storage and persistent + storage. Persistent storage can be used to add image caching with EFS +- Allow for setting a `webhook_startup_timeout` value different than `scale_down_delay_seconds`. Defaults to + `scale_down_delay_seconds` + +### references + +- N/A + +
+ +## 1.96.0 (2023-01-05T21:19:22Z) + +
+ Datadog Upstreams and Account Settings @Benbentwo (#533) + +### what + +- Datadog Upgrades (Bugfixes for Configuration on default datadog URL) +- Account Settings Fixes for emoji support and updated budgets + +### why + +- Upstreams + +
+ +## 1.95.0 (2023-01-04T23:44:35Z) + +
+ fix(aws-sso): add missing tf update perms @dudymas (#530) + +### what + +- Changes for supporting [Refarch Scaffold](github.com/cloudposse/refarch-scaffold) +- TerraformUpdateAccess permission set added + +### why + +- Allow SSO users to update dynamodb/s3 for terraform backend + +
+ +## 1.94.0 (2022-12-21T18:38:15Z) + +
+ upstream `spacelift` @Benbentwo (#526) + +### what + +- Updated Spacelift Component to latest +- Updated README with new example + +### why + +- Upstreams + +
+ +## 1.93.0 (2022-12-21T18:37:37Z) + +
+ upstream `ecs` & `ecs-service` @Benbentwo (#529) + +### what + +- upstream + - `ecs` + - `ecs-service` + +### why + +- `enabled` flag correctly destroys resources +- bugfixes and improvements +- datadog support for ecs services + +
+ +## 1.92.0 (2022-12-21T18:36:35Z) + +
+ Upstream Datadog @Benbentwo (#525) + +### what + +- Datadog updates +- New `datadog-configuration` component for setting up share functions and making codebase more dry + +
+ +## 1.91.0 (2022-11-29T17:17:58Z) + +
+ CPLIVE-320: Set VPC to use region-less AZs @nitrocode (#524) + +### what + +- Set VPC to use region-less AZs + +### why + +- Prevent having to set VPC AZs within global region defaults + +### references + +- CPLIVE-320 + +
+ +## 1.90.2 (2022-11-20T05:41:14Z) + +### πŸš€ Enhancements + +
+ Use cloudposse/template for arm support @nitrocode (#510) + +### what + +- Use cloudposse/template for arm support + +### why + +- The new cloudposse/template provider has a darwin arm binary for M1 laptops + +### references + +- https://github.com/cloudposse/terraform-provider-template +- https://registry.terraform.io/providers/cloudposse/template/latest + +
+ +## 1.90.1 (2022-10-31T13:27:37Z) + +### πŸš€ Enhancements + +
+ Allow vpc-peering to peer v2 to v2 @nitrocode (#521) + +### what + +- Allow vpc-peering to peer v2 to v2 + +### why + +- Alternative to transit gateway + +### references + +N/A + +
+ +## 1.90.0 (2022-10-31T13:24:38Z) + +
+ Upstream iam-role component @nitrocode (#520) + +### what + +- Upstream iam-role component + +### why + +- Create simple IAM roles + +### references + +- https://github.com/cloudposse/terraform-aws-iam-role + +
+ +## 1.89.0 (2022-10-28T15:35:38Z) + +
+ [eks/actions-runner-controller] Auth via GitHub App, prefer webhook auto-scaling @Nuru (#519) + +### what + +- Support and prefer authentication via GitHub app +- Support and prefer webhook-based autoscaling + +### why + +- GitHub app is much more restricted, plus has higher API rate limits +- Webhook-based autoscaling is proactive without being overly expensive + +
+ +## 1.88.0 (2022-10-24T15:40:47Z) + +
+ Upstream iam-service-linked-roles @nitrocode (#516) + +### what + +- Upstream iam-service-linked-roles (thanks to @aknysh for writing it) + +### why + +- Centralized component to create IAM service linked roles + +### references + +- N/A + +
+ +## 1.87.0 (2022-10-22T19:12:36Z) + +
+ Add account-quotas component @Nuru (#515) + +### what + +- Add `account-quotas` component to manage account service quota increase requests + +### why + +- Add service quotas to the infrastructure that can be represented in code + +### notes + +Cloud Posse has a [service quotas module](https://github.com/cloudposse/terraform-aws-service-quotas), but it has +issues, such as not allowing the service to be specified by name, and not having well documented inputs. It also takes a +list input, but Atmos does not merge lists, so a map input is more appropriate. Overall I like this component better, +and if others do, too, I will replace the existing module (only at version 0.1.0) with this code. + +
+ +## 1.86.0 (2022-10-19T07:28:11Z) + +
+ Update EKS basic components @Nuru (#509) + +### what && why + +Update EKS cluster and basic Kubernetes components for better behavior on initial deployment and on `terraform destroy`. + +- Update minimum Terraform version to 1.1.0 and use `one()` where applicable to manage resources that can be disabled + with `count = 0` and for bug fixes regarding destroy behavior +- Update `terraform-aws-eks-cluster` to v2.5.0 for better destroy behavior +- Update all components' (plus `account-map/modules/`)`remote-state` to v1.2.0 for better destroy behavior +- Update all components' `helm-release` to v0.7.0 and move namespace creation via Kubernetes provider into it to avoid + race conditions regarding creating IAM roles, Namespaces, and deployments, and to delete namespaces when destroyed +- Update `alb-controller` to deploy a default IngressClass for central, obvious configuration of shared default ingress + for services that do not have special needs. +- Add `alb-controller-ingress-class` for the rare case when we want to deploy a non-default IngressClass outside of the + component that will be using it +- Update `echo-server` to use the default IngressClass and not specify any configuration that affects other Ingresses, + and remove dependence on `alb-controller-ingress-group` (which should be deprecated in favor of + `alb-controller-ingress-class` and perhaps a specialized future `alb-controller-ingress`) +- Update `cert-manager` to remove `default.auto.tfvars` (which had a lot of settings) and add dependencies so that + initial deployment succeeds in one `terraform apply` and destroy works in one `terraform destroy` +- Update `external-dns` to remove `default.auto.tfvars` (which had a lot of settings) +- Update `karpenter` to v0.18.0, fix/update IAM policy (README still needs work, but leaving that for another day) +- Update `karpenter-provisioner` to require Terraform 1.3 and make elements of the Provisioner configuration optional. + Support block device mappings (previously broken). Avoid perpetual Terraform plan diff/drift caused by setting fields + to `null`. +- Update `reloader` +- Update `mixins/provider-helm` to better support `terraform destroy` and to default the Kubernetes client + authentication API version to `client.authentication.k8s.io/v1beta1` + +### references + +- https://github.com/cloudposse/terraform-aws-helm-release/pull/34 +- https://github.com/cloudposse/terraform-aws-eks-cluster/pull/169 +- https://github.com/cloudposse/terraform-yaml-stack-config/pull/56 +- https://github.com/hashicorp/terraform/issues/32023 + +
+ +## 1.85.0 (2022-10-18T00:05:19Z) + +
+ Upstream `github-runners` @milldr (#508) + +### what + +- Minor TLC updates for GitHub Runners ASG component + +### why + +- Maintaining up-to-date upstream + +
+ +## 1.84.0 (2022-10-12T22:49:28Z) + +
+ Fix feature allowing IAM users to assume team roles @Nuru (#507) + +### what + +- Replace `deny_all_iam_users` input with `iam_users_enabled` +- Fix implementation +- Provide more context for `bats` test failures + +### why + +- Cloud Posse style guide dictates that boolean feature flags have names ending with `_enabled` +- Previous implementation only removed 1 of 2 policy provisions that blocked IAM users from assuming a role, and + therefore IAM users were still not allowed to assume a role. Since the previous implementation did not work, a + breaking change (changing the variable name) does not need major warnings or a major version bump. +- Indication of what was being tested was too far removed from `bats` test failure message to be able to easily identify + what module had failed + +### notes + +Currently, any component provisioned by SuperAdmin needs to have a special provider configuration that requires +SuperAdmin to provision the component. This feature is part of what is needed to enable SuperAdmin (an IAM User) to work +with "normal" provider configurations. + +### references + +- Breaks change introduced in #495, but that didn't work anyway. + +
diff --git a/Dockerfile b/Dockerfile index 8f7e1a98f..9063ba7f7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,3 @@ FROM scratch -COPY aws/ /aws -WORKDIR /aws +COPY modules/ /modules +WORKDIR /modules diff --git a/LICENSE b/LICENSE index 4bd1946f1..861ef1854 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2018-2022 Cloud Posse, LLC + Copyright 2018-2024 Cloud Posse, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index d7094ba3b..d86d9554e 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,11 @@ + -# terraform-aws-components [![Latest Release](https://img.shields.io/github/release/cloudposse/terraform-aws-components.svg)](https://github.com/cloudposse/terraform-aws-components/releases/latest) [![Slack Community](https://slack.cloudposse.com/badge.svg)](https://slack.cloudposse.com) +Project Banner
+

+Latest ReleaseLast UpdateSlack Community

-[![README Header][readme_header_img]][readme_header_link] - -[![Cloud Posse][logo]](https://cpco.io/homepage) - -This is a collection of reusable Terraform components and blueprints for provisioning reference architectures. - ---- - -This project is part of our comprehensive ["SweetOps"](https://cpco.io/sweetops) approach towards DevOps. -[][share_email] -[][share_googleplus] -[][share_facebook] -[][share_reddit] -[][share_linkedin] -[][share_twitter] - - - - -It's 100% Open Source and licensed under the [APACHE2](LICENSE). - - - - +This is a collection of reusable [AWS Terraform components](https://atmos.tools/core-concepts/components/) for provisioning infrastructure used by the Cloud Posse [reference architectures](https://cloudposse.com). +They work really well with [Atmos](https://atmos.tools), our open-source tool for managing infrastructure as code with Terraform. +> [!TIP] +> #### πŸ‘½ Use Atmos with Terraform +> Cloud Posse uses [`atmos`](https://atmos.tools) to easily orchestrate multiple environments using Terraform.
+> Works with [Github Actions](https://atmos.tools/integrations/github-actions/), [Atlantis](https://atmos.tools/integrations/atlantis), or [Spacelift](https://atmos.tools/integrations/spacelift). +> +>
+> Watch demo of using Atmos with Terraform +>
+> Example of running atmos to manage infrastructure from our Quick Start tutorial. +> +## Introduction +In this repo you'll find real-world examples of how we've implemented Terraform "root" modules as native +[Atmos Components](https://atmos.tools/core-concepts/components/) for our customers. These Components +leverage our hundreds of free and open-source [terraform "child" modules](https://cpco.io/terraform-modules). +The [component library](https://docs.cloudposse.com/components/) captures the business logic, opinions, best practices and +non-functional requirements for an organization. +It's from this library that other developers in your organization will pick and choose from whenever they need to deploy some new +capability. -## Introduction +These components make a lot of assumptions (aka ["convention over configuration"](https://en.wikipedia.org/wiki/Convention_over_configuration)) about how we've configured our environments. +That said, they still serve as an excellent reference for others on how to build, organize and distribute enterprise-grade infrastructure +with Terraform that can be used with [Atmos](https://atmos.tools). -In this repo you'll find real-world examples of how we've implemented various common patterns using our [terraform modules](https://cpco.io/terraform-modules) for our customers. -The component catalog captures the business logic, opinions, best practices and non-functional requirements. -It's from this catalog that other developers in your organization will pick and choose from anytime they need to deploy some new capability. -These components make a lot of assumptions about how we've configured our environments. That said, they can still serve as an excellent reference for others. +## Usage -## Deprecations -Terraform components which are no longer actively maintained are now in the `deprecated/` folder. +Please take a look at each [component's README](https://docs.cloudposse.com/components/) for specific usage. + +> [!TIP] +> ## πŸ‘½ Use Atmos with Terraform +> To orchestrate multiple environments with ease using Terraform, Cloud Posse recommends using [Atmos](https://atmos.tools), +> our open-source tool for Terraform automation. +> +>
+> Watch demo of using Atmos with Terraform +>
+> Example of running atmos to manage infrastructure from our Quick Start tutorial. +> + +Generally, you can use these components in [Atmos](https://atmos.tools/core-concepts/components/) by adding something like the following +code into your [stack manifest](https://atmos.tools/core-concepts/stacks/): + +```yaml +components: # List of components to include in the stack + terraform: # The toolchain being used for configuration + vpc: # The name of the component (e.g. terraform "root" module) + vars: # Terraform variables (e.g. `.tfvars`) + cidr_block: 10.0.0.0/16 # A variable input passed to terraform via `.tfvars` +``` -Many of these deprecated components are used in our old reference architectures. +## Automated Updates of Components using GitHub Actions + +Leverage our [GitHub Action](https://atmos.tools/integrations/github-actions/component-updater) to automate the creation and management of pull requests for component updates. + +This is done by creating a new file (e.g. `atmos-component-updater.yml`) in the `.github/workflows` directory of your repository. + +The file should contain the following: + +```yaml +jobs: +update: + runs-on: + - "ubuntu-latest" + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Update Atmos Components + uses: cloudposse/github-action-atmos-component-updater@v2 + env: + # https://atmos.tools/cli/configuration/#environment-variables + ATMOS_CLI_CONFIG_PATH: ${{ github.workspace }}/rootfs/usr/local/etc/atmos/ + with: + github-access-token: ${{ secrets.GITHUB_TOKEN }} + log-level: INFO + max-number-of-prs: 10 + + - name: Delete abandoned update branches + uses: phpdocker-io/github-actions-delete-abandoned-branches@v2 + with: + github_token: ${{ github.token }} + last_commit_age_days: 0 + allowed_prefixes: "component-update/" + dry_run: no +``` -We intend to eventually delete, but are leaving them for now in the repo. +For the full documentation on how to use the Component Updater GitHub Action, please see the [Atmos Integrations](https://atmos.tools/integrations/github-actions/component-updater) documentation. ## Using `pre-commit` Hooks @@ -91,39 +146,22 @@ Then run the following command to rebuild the docs for all Terraform components: make rebuild-docs ``` +> [!IMPORTANT] +> ## Deprecated Components +> Terraform components which are no longer actively maintained are kept in the [`deprecated/`](deprecated/) folder. +> +> Many of these deprecated components are used in our older reference architectures. +> +> We intend to eventually delete, but are leaving them for now in the repo. +> [!IMPORTANT] +> In Cloud Posse's examples, we avoid pinning modules to specific versions to prevent discrepancies between the documentation +> and the latest released versions. However, for your own projects, we strongly advise pinning each module to the exact version +> you're using. This practice ensures the stability of your infrastructure. Additionally, we recommend implementing a systematic +> approach for updating versions to avoid unexpected changes. -## Usage - - - -See each component's README directory for usage. - -| Component | Description | -|-----------|-------------| -|[account](./modules/account) | Provisions the full account hierarchy along with Organizational Units (OUs). | -|[account-map](./modules/account-map) | Provisions information only: it simply populates Terraform state with data (account ids, groups, and roles) that other root modules need via outputs. | -|[account-settings](./modules/account-settings) | Provisions account level settings: IAM password policy, AWS Account Alias, and EBS encryption. | -|[cloudtrail](./modules/cloudtrail) | Provisions cloudtrail auditing in an individual account. | -|[cloudtrail-bucket](./modules/cloudtrail-bucket) | Provisions a bucket for storing cloudtrail logs for auditing purposes. | -|[datadog-integration](./modules/datadog-integration) | Provisions a DataDog <=> AWS integration. | -|[datadog-monitor](./modules/datadog-monitor) | Provisions global DataDog monitors. | -|[dms](./modules/dms) | Provisions AWS DMS resources: DMS IAM roles, DMS endpoints, DMS replication instances, DMS replication tasks. | -|[dns-delegated](./modules/dns-delegated) | Provisions a DNS zone which delegates nameservers to the DNS zone in the primary DNS account. | -|[dns-primary](./modules/dns-primary) | Provisions the primary DNS zones into an AWS account. | -|[ecr](./modules/ecr) | Provisions repositories, lifecycle rules, and permissions for streamlined ECR usage. | -|[efs](./modules/efs) | Provisions an [EFS](https://aws.amazon.com/efs/) Network File System with KMS encryption-at-rest. | -|[eks](./modules/eks) | Provisions an end-to-end EKS Cluster, including managed node groups and [spotinst ocean](https://spot.io/products/ocean/) node pools. | -|[eks-iam](./modules/eks-iam) | Provisions specific [IAM roles for Kubernetes Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). | -|[iam-delegated-roles](./modules/iam-delegated-roles) | Provisions all delegated user and system IAM roles. | -|[iam-primary-roles](./modules/iam-primary-roles) | Provisions all primary user and system roles into the centralized identity account. | -|[sso](./modules/sso) | Provisions SAML metadata into AWS IAM as new SAML providers. | -|[tfstate-backend](./modules/tfstate-backend) | Provisions an S3 Bucket and DynamoDB table that follow security best practices for usage as a Terraform backend. | -|[transit-gateway](./modules/transit-gateway) | Provisions an AWS Transit Gateway to connect various account separated VPCs through a central hub. | -|[vpc](./modules/vpc) | Provisions a VPC and corresponing Subnets. | - @@ -143,51 +181,14 @@ Available targets: ``` - -## Requirements - -No requirements. - -## Providers - -No providers. - -## Modules - -No modules. - -## Resources - -No resources. - -## Inputs - -No inputs. - -## Outputs - -No outputs. - - - - -## Share the Love - -Like this project? Please give it a β˜… on [our GitHub](https://github.com/cloudposse/terraform-aws-components)! (it helps us **a lot**) - -Are you using this project or any of our other projects? Consider [leaving a testimonial][testimonial]. =) - ## Related Projects Check out these related projects. -- [reference-architectures](https://github.com/cloudposse/reference-architectures) - Get up and running quickly with one of our reference architecture using our fully automated cold-start process. -- [audit.cloudposse.co](https://github.com/cloudposse/audit.cloudposse.co) - Example Terraform Reference Architecture of a Geodesic Module for an Audit Logs Organization in AWS. -- [prod.cloudposse.co](https://github.com/cloudposse/prod.cloudposse.co) - Example Terraform Reference Architecture of a Geodesic Module for a Production Organization in AWS. -- [staging.cloudposse.co](https://github.com/cloudposse/staging.cloudposse.co) - Example Terraform Reference Architecture of a Geodesic Module for a Staging Organization in AWS. -- [dev.cloudposse.co](https://github.com/cloudposse/dev.cloudposse.co) - Example Terraform Reference Architecture of a Geodesic Module for a Development Sandbox Organization in AWS. +- [Cloud Posse Terraform Modules](https://docs.cloudposse.com/modules/) - Our collection of reusable Terraform modules used by our reference architectures. +- [Atmos](https://atmos.tools) - Atmos is like docker-compose but for your infrastructure ## References @@ -195,88 +196,97 @@ Check out these related projects. For additional context, refer to some of these links. - [Cloud Posse Documentation](https://docs.cloudposse.com) - Complete documentation for the Cloud Posse solution - - -## Help - -**Got a question?** We got answers. - -File a GitHub [issue](https://github.com/cloudposse/terraform-aws-components/issues), send us an [email][email] or join our [Slack Community][slack]. - -[![README Commercial Support][readme_commercial_support_img]][readme_commercial_support_link] - -## DevOps Accelerator for Startups - - -We are a [**DevOps Accelerator**][commercial_support]. We'll help you build your cloud infrastructure from the ground up so you can own it. Then we'll show you how to operate it and stick around for as long as you need us. - -[![Learn More](https://img.shields.io/badge/learn%20more-success.svg?style=for-the-badge)][commercial_support] - -Work directly with our team of DevOps experts via email, slack, and video conferencing. - -We deliver 10x the value for a fraction of the cost of a full-time engineer. Our track record is not even funny. If you want things done right and you need it done FAST, then we're your best bet. - -- **Reference Architecture.** You'll get everything you need from the ground up built using 100% infrastructure as code. -- **Release Engineering.** You'll have end-to-end CI/CD with unlimited staging environments. -- **Site Reliability Engineering.** You'll have total visibility into your apps and microservices. -- **Security Baseline.** You'll have built-in governance with accountability and audit logs for all changes. -- **GitOps.** You'll be able to operate your infrastructure via Pull Requests. -- **Training.** You'll receive hands-on training so your team can operate what we build. -- **Questions.** You'll have a direct line of communication between our teams via a Shared Slack channel. -- **Troubleshooting.** You'll get help to triage when things aren't working. -- **Code Reviews.** You'll receive constructive feedback on Pull Requests. -- **Bug Fixes.** We'll rapidly work with you to fix any bugs in our projects. - -## Slack Community - -Join our [Open Source Community][slack] on Slack. It's **FREE** for everyone! Our "SweetOps" community is where you get to talk with others who share a similar vision for how to rollout and manage infrastructure. This is the best place to talk shop, ask questions, solicit feedback, and work together as a community to build totally *sweet* infrastructure. - -## Discourse Forums - -Participate in our [Discourse Forums][discourse]. Here you'll find answers to commonly asked questions. Most questions will be related to the enormous number of projects we support on our GitHub. Come here to collaborate on answers, find solutions, and get ideas about the products and services we value. It only takes a minute to get started! Just sign in with SSO using your GitHub account. - -## Newsletter - -Sign up for [our newsletter][newsletter] that covers everything on our technology radar. Receive updates on what we're up to on GitHub as well as awesome new projects we discover. - -## Office Hours - -[Join us every Wednesday via Zoom][office_hours] for our weekly "Lunch & Learn" sessions. It's **FREE** for everyone! - -[![zoom](https://img.cloudposse.com/fit-in/200x200/https://cloudposse.com/wp-content/uploads/2019/08/Powered-by-Zoom.png")][office_hours] - -## Contributing - -### Bug Reports & Feature Requests - -Please use the [issue tracker](https://github.com/cloudposse/terraform-aws-components/issues) to report any bugs or file feature requests. - -### Developing - -If you are interested in being a contributor and want to get involved in developing this project or [help out](https://cpco.io/help-out) with our other projects, we would love to hear from you! Shoot us an [email][email]. +- [Reference Architectures](https://cloudposse.com/) - Launch effortlessly with our turnkey reference architectures, built either by your team or ours. + + + +> [!TIP] +> #### Use Terraform Reference Architectures for AWS +> +> Use Cloud Posse's ready-to-go [terraform architecture blueprints](https://cloudposse.com/reference-architecture/) for AWS to get up and running quickly. +> +> βœ… We build it together with your team.
+> βœ… Your team owns everything.
+> βœ… 100% Open Source and backed by fanatical support.
+> +> Request Quote +>
πŸ“š Learn More +> +>
+> +> Cloud Posse is the leading [**DevOps Accelerator**](https://cpco.io/commercial-support?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=commercial_support) for funded startups and enterprises. +> +> *Your team can operate like a pro today.* +> +> Ensure that your team succeeds by using Cloud Posse's proven process and turnkey blueprints. Plus, we stick around until you succeed. +> #### Day-0: Your Foundation for Success +> - **Reference Architecture.** You'll get everything you need from the ground up built using 100% infrastructure as code. +> - **Deployment Strategy.** Adopt a proven deployment strategy with GitHub Actions, enabling automated, repeatable, and reliable software releases. +> - **Site Reliability Engineering.** Gain total visibility into your applications and services with Datadog, ensuring high availability and performance. +> - **Security Baseline.** Establish a secure environment from the start, with built-in governance, accountability, and comprehensive audit logs, safeguarding your operations. +> - **GitOps.** Empower your team to manage infrastructure changes confidently and efficiently through Pull Requests, leveraging the full power of GitHub Actions. +> +> Request Quote +> +> #### Day-2: Your Operational Mastery +> - **Training.** Equip your team with the knowledge and skills to confidently manage the infrastructure, ensuring long-term success and self-sufficiency. +> - **Support.** Benefit from a seamless communication over Slack with our experts, ensuring you have the support you need, whenever you need it. +> - **Troubleshooting.** Access expert assistance to quickly resolve any operational challenges, minimizing downtime and maintaining business continuity. +> - **Code Reviews.** Enhance your team’s code quality with our expert feedback, fostering continuous improvement and collaboration. +> - **Bug Fixes.** Rely on our team to troubleshoot and resolve any issues, ensuring your systems run smoothly. +> - **Migration Assistance.** Accelerate your migration process with our dedicated support, minimizing disruption and speeding up time-to-value. +> - **Customer Workshops.** Engage with our team in weekly workshops, gaining insights and strategies to continuously improve and innovate. +> +> Request Quote +>
+ +## ✨ Contributing + +This project is under active development, and we encourage contributions from our community. + + + +Many thanks to our outstanding contributors: + + + + + +For πŸ› bug reports & feature requests, please use the [issue tracker](https://github.com/cloudposse/terraform-aws-components/issues). In general, PRs are welcome. We follow the typical "fork-and-pull" Git workflow. - - 1. **Fork** the repo on GitHub - 2. **Clone** the project to your own machine - 3. **Commit** changes to your own branch - 4. **Push** your work back up to your fork - 5. Submit a **Pull Request** so that we can review your changes + 1. Review our [Code of Conduct](https://github.com/cloudposse/terraform-aws-components/?tab=coc-ov-file#code-of-conduct) and [Contributor Guidelines](https://github.com/cloudposse/.github/blob/main/CONTRIBUTING.md). + 2. **Fork** the repo on GitHub + 3. **Clone** the project to your own machine + 4. **Commit** changes to your own branch + 5. **Push** your work back up to your fork + 6. Submit a **Pull Request** so that we can review your changes **NOTE:** Be sure to merge the latest changes from "upstream" before making a pull request! +### 🌎 Slack Community -## Copyright +Join our [Open Source Community](https://cpco.io/slack?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=slack) on Slack. It's **FREE** for everyone! Our "SweetOps" community is where you get to talk with others who share a similar vision for how to rollout and manage infrastructure. This is the best place to talk shop, ask questions, solicit feedback, and work together as a community to build totally *sweet* infrastructure. -Copyright Β© 2017-2022 [Cloud Posse, LLC](https://cpco.io/copyright) +### πŸ“° Newsletter +Sign up for [our newsletter](https://cpco.io/newsletter?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=newsletter) and join 3,000+ DevOps engineers, CTOs, and founders who get insider access to the latest DevOps trends, so you can always stay in the know. +Dropped straight into your Inbox every week β€” and usually a 5-minute read. +### πŸ“† Office Hours +[Join us every Wednesday via Zoom](https://cloudposse.com/office-hours?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=office_hours) for your weekly dose of insider DevOps trends, AWS news and Terraform insights, all sourced from our SweetOps community, plus a _live Q&A_ that you can’t find anywhere else. +It's **FREE** for everyone! ## License -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +License + +
+Preamble to the Apache License, Version 2.0 +
+
-See [LICENSE](LICENSE) for full details. +Complete license is available in the [`LICENSE`](LICENSE) file. ```text Licensed to the Apache Software Foundation (ASF) under one @@ -296,82 +306,17 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ``` - - - - - - - - +
## Trademarks All other trademarks referenced herein are the property of their respective owners. -## About - -This project is maintained and funded by [Cloud Posse, LLC][website]. Like it? Please let us know by [leaving a testimonial][testimonial]! - -[![Cloud Posse][logo]][website] - -We're a [DevOps Professional Services][hire] company based in Los Angeles, CA. We ❀️ [Open Source Software][we_love_open_source]. - -We offer [paid support][commercial_support] on all of our projects. - -Check out [our other projects][github], [follow us on twitter][twitter], [apply for a job][jobs], or [hire us][hire] to help with your cloud strategy and implementation. - +--- +Copyright © 2017-2024 [Cloud Posse, LLC](https://cpco.io/copyright) -### Contributors - -| [![Erik Osterman][osterman_avatar]][osterman_homepage]
[Erik Osterman][osterman_homepage] | [![Igor Rodionov][goruha_avatar]][goruha_homepage]
[Igor Rodionov][goruha_homepage] | [![Andriy Knysh][aknysh_avatar]][aknysh_homepage]
[Andriy Knysh][aknysh_homepage] | [![Matt Gowie][Gowiem_avatar]][Gowiem_homepage]
[Matt Gowie][Gowiem_homepage] | [![Yonatan Koren][korenyoni_avatar]][korenyoni_homepage]
[Yonatan Koren][korenyoni_homepage] | -|---|---|---|---|---| - +README footer - [osterman_homepage]: https://github.com/osterman - [osterman_avatar]: https://img.cloudposse.com/150x150/https://github.com/osterman.png - [goruha_homepage]: https://github.com/goruha - [goruha_avatar]: https://img.cloudposse.com/150x150/https://github.com/goruha.png - [aknysh_homepage]: https://github.com/aknysh - [aknysh_avatar]: https://img.cloudposse.com/150x150/https://github.com/aknysh.png - [Gowiem_homepage]: https://github.com/Gowiem - [Gowiem_avatar]: https://img.cloudposse.com/150x150/https://github.com/Gowiem.png - [korenyoni_homepage]: https://github.com/korenyoni - [korenyoni_avatar]: https://img.cloudposse.com/150x150/https://github.com/korenyoni.png - -[![README Footer][readme_footer_img]][readme_footer_link] -[![Beacon][beacon]][website] - - [logo]: https://cloudposse.com/logo-300x69.svg - [docs]: https://cpco.io/docs?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=docs - [website]: https://cpco.io/homepage?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=website - [github]: https://cpco.io/github?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=github - [jobs]: https://cpco.io/jobs?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=jobs - [hire]: https://cpco.io/hire?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=hire - [slack]: https://cpco.io/slack?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=slack - [linkedin]: https://cpco.io/linkedin?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=linkedin - [twitter]: https://cpco.io/twitter?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=twitter - [testimonial]: https://cpco.io/leave-testimonial?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=testimonial - [office_hours]: https://cloudposse.com/office-hours?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=office_hours - [newsletter]: https://cpco.io/newsletter?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=newsletter - [discourse]: https://ask.sweetops.com/?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=discourse - [email]: https://cpco.io/email?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=email - [commercial_support]: https://cpco.io/commercial-support?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=commercial_support - [we_love_open_source]: https://cpco.io/we-love-open-source?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=we_love_open_source - [terraform_modules]: https://cpco.io/terraform-modules?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=terraform_modules - [readme_header_img]: https://cloudposse.com/readme/header/img - [readme_header_link]: https://cloudposse.com/readme/header/link?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=readme_header_link - [readme_footer_img]: https://cloudposse.com/readme/footer/img - [readme_footer_link]: https://cloudposse.com/readme/footer/link?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=readme_footer_link - [readme_commercial_support_img]: https://cloudposse.com/readme/commercial-support/img - [readme_commercial_support_link]: https://cloudposse.com/readme/commercial-support/link?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-components&utm_content=readme_commercial_support_link - [share_twitter]: https://twitter.com/intent/tweet/?text=terraform-aws-components&url=https://github.com/cloudposse/terraform-aws-components - [share_linkedin]: https://www.linkedin.com/shareArticle?mini=true&title=terraform-aws-components&url=https://github.com/cloudposse/terraform-aws-components - [share_reddit]: https://reddit.com/submit/?url=https://github.com/cloudposse/terraform-aws-components - [share_facebook]: https://facebook.com/sharer/sharer.php?u=https://github.com/cloudposse/terraform-aws-components - [share_googleplus]: https://plus.google.com/share?url=https://github.com/cloudposse/terraform-aws-components - [share_email]: mailto:?subject=terraform-aws-components&body=https://github.com/cloudposse/terraform-aws-components - [beacon]: https://ga-beacon.cloudposse.com/UA-76589703-4/cloudposse/terraform-aws-components?pixel&cs=github&cm=readme&an=terraform-aws-components - +Beacon diff --git a/README.yaml b/README.yaml index 81d5cc503..e264b3b75 100644 --- a/README.yaml +++ b/README.yaml @@ -36,54 +36,118 @@ github_repo: "cloudposse/terraform-aws-components" # Badges to display badges: - name: "Latest Release" - image: "https://img.shields.io/github/release/cloudposse/terraform-aws-components.svg" + image: "https://img.shields.io/github/release/cloudposse/terraform-aws-components.svg?style=for-the-badge" url: "https://github.com/cloudposse/terraform-aws-components/releases/latest" + - name: "Last Update" + image: https://img.shields.io/github/last-commit/cloudposse/terraform-aws-components/main?style=for-the-badge + url: https://github.com/cloudposse/terraform-aws-components/commits/main/ - name: "Slack Community" - image: "https://slack.cloudposse.com/badge.svg" + image: "https://slack.cloudposse.com/for-the-badge.svg" url: "https://slack.cloudposse.com" references: - name: "Cloud Posse Documentation" description: "Complete documentation for the Cloud Posse solution" url: "https://docs.cloudposse.com" + - name: "Reference Architectures" + description: "Launch effortlessly with our turnkey reference architectures, built either by your team or ours." + url: "https://cloudposse.com/" related: - - name: "reference-architectures" - description: "Get up and running quickly with one of our reference architecture using our fully automated cold-start process." - url: "https://github.com/cloudposse/reference-architectures" - - name: "audit.cloudposse.co" - description: "Example Terraform Reference Architecture of a Geodesic Module for an Audit Logs Organization in AWS." - url: "https://github.com/cloudposse/audit.cloudposse.co" - - name: "prod.cloudposse.co" - description: "Example Terraform Reference Architecture of a Geodesic Module for a Production Organization in AWS." - url: "https://github.com/cloudposse/prod.cloudposse.co" - - name: "staging.cloudposse.co" - description: "Example Terraform Reference Architecture of a Geodesic Module for a Staging Organization in AWS." - url: "https://github.com/cloudposse/staging.cloudposse.co" - - name: "dev.cloudposse.co" - description: "Example Terraform Reference Architecture of a Geodesic Module for a Development Sandbox Organization in AWS." - url: "https://github.com/cloudposse/dev.cloudposse.co" +- name: "Cloud Posse Terraform Modules" + description: Our collection of reusable Terraform modules used by our reference architectures. + url: "https://docs.cloudposse.com/modules/" +- name: "Atmos" + description: "Atmos is like docker-compose but for your infrastructure" + url: "https://atmos.tools" + # Short description of this project description: |- - This is a collection of reusable Terraform components and blueprints for provisioning reference architectures. + This is a collection of reusable [AWS Terraform components](https://atmos.tools/core-concepts/components/) for provisioning infrastructure used by the Cloud Posse [reference architectures](https://cloudposse.com). + They work really well with [Atmos](https://atmos.tools), our open-source tool for managing infrastructure as code with Terraform. introduction: |- - In this repo you'll find real-world examples of how we've implemented various common patterns using our [terraform modules](https://cpco.io/terraform-modules) for our customers. + In this repo you'll find real-world examples of how we've implemented Terraform "root" modules as native + [Atmos Components](https://atmos.tools/core-concepts/components/) for our customers. These Components + leverage our hundreds of free and open-source [terraform "child" modules](https://cpco.io/terraform-modules). - The component catalog captures the business logic, opinions, best practices and non-functional requirements. + The [component library](https://docs.cloudposse.com/components/) captures the business logic, opinions, best practices and + non-functional requirements for an organization. - It's from this catalog that other developers in your organization will pick and choose from anytime they need to deploy some new capability. + It's from this library that other developers in your organization will pick and choose from whenever they need to deploy some new + capability. - These components make a lot of assumptions about how we've configured our environments. That said, they can still serve as an excellent reference for others. + These components make a lot of assumptions (aka ["convention over configuration"](https://en.wikipedia.org/wiki/Convention_over_configuration)) about how we've configured our environments. + That said, they still serve as an excellent reference for others on how to build, organize and distribute enterprise-grade infrastructure + with Terraform that can be used with [Atmos](https://atmos.tools). - ## Deprecations +# How to use this project +usage: |- - Terraform components which are no longer actively maintained are now in the `deprecated/` folder. + Please take a look at each [component's README](https://docs.cloudposse.com/components/) for specific usage. + + > [!TIP] + > ## πŸ‘½ Use Atmos with Terraform + > To orchestrate multiple environments with ease using Terraform, Cloud Posse recommends using [Atmos](https://atmos.tools), + > our open-source tool for Terraform automation. + > + >
+ > Watch demo of using Atmos with Terraform + >
+ > Example of running atmos to manage infrastructure from our Quick Start tutorial. + > + + Generally, you can use these components in [Atmos](https://atmos.tools/core-concepts/components/) by adding something like the following + code into your [stack manifest](https://atmos.tools/core-concepts/stacks/): + + ```yaml + components: # List of components to include in the stack + terraform: # The toolchain being used for configuration + vpc: # The name of the component (e.g. terraform "root" module) + vars: # Terraform variables (e.g. `.tfvars`) + cidr_block: 10.0.0.0/16 # A variable input passed to terraform via `.tfvars` + ``` - Many of these deprecated components are used in our old reference architectures. + ## Automated Updates of Components using GitHub Actions + + Leverage our [GitHub Action](https://atmos.tools/integrations/github-actions/component-updater) to automate the creation and management of pull requests for component updates. + + This is done by creating a new file (e.g. `atmos-component-updater.yml`) in the `.github/workflows` directory of your repository. + + The file should contain the following: + + ```yaml + jobs: + update: + runs-on: + - "ubuntu-latest" + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Update Atmos Components + uses: cloudposse/github-action-atmos-component-updater@v2 + env: + # https://atmos.tools/cli/configuration/#environment-variables + ATMOS_CLI_CONFIG_PATH: ${{ github.workspace }}/rootfs/usr/local/etc/atmos/ + with: + github-access-token: ${{ secrets.GITHUB_TOKEN }} + log-level: INFO + max-number-of-prs: 10 + + - name: Delete abandoned update branches + uses: phpdocker-io/github-actions-delete-abandoned-branches@v2 + with: + github_token: ${{ github.token }} + last_commit_age_days: 0 + allowed_prefixes: "component-update/" + dry_run: no + ``` - We intend to eventually delete, but are leaving them for now in the repo. + For the full documentation on how to use the Component Updater GitHub Action, please see the [Atmos Integrations](https://atmos.tools/integrations/github-actions/component-updater) documentation. ## Using `pre-commit` Hooks @@ -102,46 +166,16 @@ introduction: |- make rebuild-docs ``` -# How to use this project -usage: |- - See each component's README directory for usage. - - | Component | Description | - |-----------|-------------| - |[account](./modules/account) | Provisions the full account hierarchy along with Organizational Units (OUs). | - |[account-map](./modules/account-map) | Provisions information only: it simply populates Terraform state with data (account ids, groups, and roles) that other root modules need via outputs. | - |[account-settings](./modules/account-settings) | Provisions account level settings: IAM password policy, AWS Account Alias, and EBS encryption. | - |[cloudtrail](./modules/cloudtrail) | Provisions cloudtrail auditing in an individual account. | - |[cloudtrail-bucket](./modules/cloudtrail-bucket) | Provisions a bucket for storing cloudtrail logs for auditing purposes. | - |[datadog-integration](./modules/datadog-integration) | Provisions a DataDog <=> AWS integration. | - |[datadog-monitor](./modules/datadog-monitor) | Provisions global DataDog monitors. | - |[dms](./modules/dms) | Provisions AWS DMS resources: DMS IAM roles, DMS endpoints, DMS replication instances, DMS replication tasks. | - |[dns-delegated](./modules/dns-delegated) | Provisions a DNS zone which delegates nameservers to the DNS zone in the primary DNS account. | - |[dns-primary](./modules/dns-primary) | Provisions the primary DNS zones into an AWS account. | - |[ecr](./modules/ecr) | Provisions repositories, lifecycle rules, and permissions for streamlined ECR usage. | - |[efs](./modules/efs) | Provisions an [EFS](https://aws.amazon.com/efs/) Network File System with KMS encryption-at-rest. | - |[eks](./modules/eks) | Provisions an end-to-end EKS Cluster, including managed node groups and [spotinst ocean](https://spot.io/products/ocean/) node pools. | - |[eks-iam](./modules/eks-iam) | Provisions specific [IAM roles for Kubernetes Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). | - |[iam-delegated-roles](./modules/iam-delegated-roles) | Provisions all delegated user and system IAM roles. | - |[iam-primary-roles](./modules/iam-primary-roles) | Provisions all primary user and system roles into the centralized identity account. | - |[sso](./modules/sso) | Provisions SAML metadata into AWS IAM as new SAML providers. | - |[tfstate-backend](./modules/tfstate-backend) | Provisions an S3 Bucket and DynamoDB table that follow security best practices for usage as a Terraform backend. | - |[transit-gateway](./modules/transit-gateway) | Provisions an AWS Transit Gateway to connect various account separated VPCs through a central hub. | - |[vpc](./modules/vpc) | Provisions a VPC and corresponing Subnets. | + > [!IMPORTANT] + > ## Deprecated Components + > Terraform components which are no longer actively maintained are kept in the [`deprecated/`](deprecated/) folder. + > + > Many of these deprecated components are used in our older reference architectures. + > + > We intend to eventually delete, but are leaving them for now in the repo. include: - "docs/targets.md" - - "docs/terraform.md" # Contributors to this project -contributors: - - name: "Erik Osterman" - github: "osterman" - - name: "Igor Rodionov" - github: "goruha" - - name: "Andriy Knysh" - github: "aknysh" - - name: "Matt Gowie" - github: "Gowiem" - - name: "Yonatan Koren" - github: "korenyoni" +contributors: [] diff --git a/deprecated/account-map/modules/iam-roles/README.md b/deprecated/account-map/modules/iam-roles/README.md index 984c9beaa..0de665565 100644 --- a/deprecated/account-map/modules/iam-roles/README.md +++ b/deprecated/account-map/modules/iam-roles/README.md @@ -2,13 +2,13 @@ This submodule is used by other modules to determine which IAM Roles or AWS CLI Config Profiles to use for various tasks, most commonly -for applying Terraform plans. +for applying Terraform plans. ## Special Configuration Needed In order to avoid having to pass customization information through every module that uses this submodule, if the default configuration does not suit your needs, -you are expected to customize `variables.tf` with the defaults you want to +you are expected to customize `variables.tf` with the defaults you want to use in your project. For example, if you are including the `tenant` label in the designation of your "root" account (your Organization Management Account), then you should modify `variables.tf` so that `global_tenant_name` defaults diff --git a/deprecated/account-map/modules/roles-to-principals/README.md b/deprecated/account-map/modules/roles-to-principals/README.md index a24094074..82b128d8c 100644 --- a/deprecated/account-map/modules/roles-to-principals/README.md +++ b/deprecated/account-map/modules/roles-to-principals/README.md @@ -1,15 +1,15 @@ # Submodule `roles-to-principals` -This submodule is used by other modules to map short role names and AWS +This submodule is used by other modules to map short role names and AWS SSO Permission Set names in accounts designated by short account names (for example, `terraform` in the `dev` account) to full IAM Role ARNs and -other related tasks. +other related tasks. ## Special Configuration Needed In order to avoid having to pass customization information through every module that uses this submodule, if the default configuration does not suit your needs, -you are expected to customize `variables.tf` with the defaults you want to +you are expected to customize `variables.tf` with the defaults you want to use in your project. For example, if you are including the `tenant` label in the designation of your "root" account (your Organization Management Account), then you should modify `variables.tf` so that `global_tenant_name` defaults diff --git a/modules/aws-waf-acl/README.md b/deprecated/aws-waf-acl/README.md similarity index 98% rename from modules/aws-waf-acl/README.md rename to deprecated/aws-waf-acl/README.md index 607db25a0..64e50e47e 100644 --- a/modules/aws-waf-acl/README.md +++ b/deprecated/aws-waf-acl/README.md @@ -39,18 +39,18 @@ components: | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | ~> 0.14.9 | -| [aws](#requirement\_aws) | ~> 3.36 | -| [external](#requirement\_external) | ~> 2.1 | -| [local](#requirement\_local) | ~> 2.1 | -| [template](#requirement\_template) | ~> 2.2 | -| [utils](#requirement\_utils) | ~> 0.3 | +| [terraform](#requirement\_terraform) | >= 0.14.9 | +| [aws](#requirement\_aws) | >= 3.36 | +| [external](#requirement\_external) | >= 2.1 | +| [local](#requirement\_local) | >= 2.1 | +| [template](#requirement\_template) | >= 2.2 | +| [utils](#requirement\_utils) | >= 0.3 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 3.36 | +| [aws](#provider\_aws) | >= 3.36 | ## Modules @@ -83,7 +83,6 @@ components: | [environment](#input\_environment) | Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [geo\_match\_statement\_rules](#input\_geo\_match\_statement\_rules) | A rule statement used to identify web requests based on country of origin.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
country\_codes:
A list of two-character country codes.
forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for default, which is `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | | [ip\_set\_reference\_statement\_rules](#input\_ip\_set\_reference\_statement\_rules) | A rule statement used to detect web requests coming from particular IP addresses or address ranges.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
arn:
The ARN of the IP Set that this statement references.
ip\_set\_forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.
position:
The position in the header to search for the IP address.
Possible values include: `FIRST`, `LAST`, or `ANY`.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The naming order of the id output and Name tag.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 5 elements, but at least one must be present. | `list(string)` | `null` | no | diff --git a/modules/aws-waf-acl/context.tf b/deprecated/aws-waf-acl/context.tf similarity index 100% rename from modules/aws-waf-acl/context.tf rename to deprecated/aws-waf-acl/context.tf diff --git a/modules/aws-waf-acl/main.tf b/deprecated/aws-waf-acl/main.tf similarity index 100% rename from modules/aws-waf-acl/main.tf rename to deprecated/aws-waf-acl/main.tf diff --git a/modules/aws-waf-acl/outputs.tf b/deprecated/aws-waf-acl/outputs.tf similarity index 100% rename from modules/aws-waf-acl/outputs.tf rename to deprecated/aws-waf-acl/outputs.tf diff --git a/deprecated/aws-waf-acl/providers.tf b/deprecated/aws-waf-acl/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/deprecated/aws-waf-acl/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/aws-waf-acl/variables.tf b/deprecated/aws-waf-acl/variables.tf similarity index 100% rename from modules/aws-waf-acl/variables.tf rename to deprecated/aws-waf-acl/variables.tf diff --git a/modules/aws-waf-acl/versions.tf b/deprecated/aws-waf-acl/versions.tf similarity index 59% rename from modules/aws-waf-acl/versions.tf rename to deprecated/aws-waf-acl/versions.tf index 838c22d35..4da0d958d 100644 --- a/modules/aws-waf-acl/versions.tf +++ b/deprecated/aws-waf-acl/versions.tf @@ -1,26 +1,26 @@ terraform { - required_version = "~> 0.14.9" + required_version = ">= 0.14.9" required_providers { aws = { source = "hashicorp/aws" - version = "~> 3.36" + version = ">= 3.36" } external = { source = "hashicorp/external" - version = "~> 2.1" + version = ">= 2.1" } template = { - source = "hashicorp/template" - version = "~> 2.2" + source = "cloudposse/template" + version = ">= 2.2" } local = { source = "hashicorp/local" - version = "~> 2.1" + version = ">= 2.1" } utils = { source = "cloudposse/utils" - version = "~> 0.3" + version = ">= 0.3" } } } diff --git a/deprecated/aws/account-dns/main.tf b/deprecated/aws/account-dns/main.tf index e0593acf7..2cfbba207 100644 --- a/deprecated/aws/account-dns/main.tf +++ b/deprecated/aws/account-dns/main.tf @@ -5,28 +5,28 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "domain_name" { - type = "string" + type = string description = "Domain name" } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } resource "aws_route53_zone" "dns_zone" { - name = "${var.domain_name}" + name = var.domain_name } resource "aws_route53_record" "dns_zone_soa" { allow_overwrite = true - zone_id = "${aws_route53_zone.dns_zone.id}" - name = "${aws_route53_zone.dns_zone.name}" + zone_id = aws_route53_zone.dns_zone.id + name = aws_route53_zone.dns_zone.name type = "SOA" ttl = "60" @@ -36,9 +36,9 @@ resource "aws_route53_record" "dns_zone_soa" { } output "zone_id" { - value = "${aws_route53_zone.dns_zone.zone_id}" + value = aws_route53_zone.dns_zone.zone_id } output "name_servers" { - value = "${aws_route53_zone.dns_zone.name_servers}" + value = aws_route53_zone.dns_zone.name_servers } diff --git a/deprecated/aws/account-settings/main.tf b/deprecated/aws/account-settings/main.tf index fe8b0e093..b9f077900 100644 --- a/deprecated/aws/account-settings/main.tf +++ b/deprecated/aws/account-settings/main.tf @@ -6,16 +6,16 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } module "account_settings" { source = "git::https://github.com/cloudposse/terraform-aws-iam-account-settings.git?ref=tags/0.1.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - enabled = "${var.enabled}" + namespace = var.namespace + stage = var.stage + name = var.name + enabled = var.enabled - minimum_password_length = "${var.minimum_password_length}" + minimum_password_length = var.minimum_password_length } diff --git a/deprecated/aws/account-settings/outputs.tf b/deprecated/aws/account-settings/outputs.tf index 2cf493c1d..c6066c2a6 100644 --- a/deprecated/aws/account-settings/outputs.tf +++ b/deprecated/aws/account-settings/outputs.tf @@ -1,11 +1,11 @@ output "account_alias" { - value = "${module.account_settings.account_alias}" + value = module.account_settings.account_alias } output "minimum_password_length" { - value = "${module.account_settings.minimum_password_length}" + value = module.account_settings.minimum_password_length } output "signin_url" { - value = "${module.account_settings.signin_url}" + value = module.account_settings.signin_url } diff --git a/deprecated/aws/account-settings/variables.tf b/deprecated/aws/account-settings/variables.tf index cd2917577..a41e9aba4 100644 --- a/deprecated/aws/account-settings/variables.tf +++ b/deprecated/aws/account-settings/variables.tf @@ -1,5 +1,5 @@ variable "minimum_password_length" { - type = "string" + type = string description = "Minimum number of characters allowed in an IAM user password. Integer between 6 and 128, per https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html" ## Same default as https://github.com/cloudposse/terraform-aws-iam-account-settings: @@ -7,21 +7,21 @@ variable "minimum_password_length" { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string description = "Application or solution name (e.g. `app`)" default = "account" } diff --git a/deprecated/aws/accounts/audit.tf b/deprecated/aws/accounts/audit.tf index d4622aa94..826f0bc74 100644 --- a/deprecated/aws/accounts/audit.tf +++ b/deprecated/aws/accounts/audit.tf @@ -1,21 +1,21 @@ module "audit" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "audit" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "audit_account_arn" { - value = "${module.audit.account_arn}" + value = module.audit.account_arn } output "audit_account_id" { - value = "${module.audit.account_id}" + value = module.audit.account_id } output "audit_organization_account_access_role" { - value = "${module.audit.organization_account_access_role}" + value = module.audit.organization_account_access_role } diff --git a/deprecated/aws/accounts/corp.tf b/deprecated/aws/accounts/corp.tf index fdff28f83..39875f236 100644 --- a/deprecated/aws/accounts/corp.tf +++ b/deprecated/aws/accounts/corp.tf @@ -1,21 +1,21 @@ module "corp" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "corp" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "corp_account_arn" { - value = "${module.corp.account_arn}" + value = module.corp.account_arn } output "corp_account_id" { - value = "${module.corp.account_id}" + value = module.corp.account_id } output "corp_organization_account_access_role" { - value = "${module.corp.organization_account_access_role}" + value = module.corp.organization_account_access_role } diff --git a/deprecated/aws/accounts/data.tf b/deprecated/aws/accounts/data.tf index e252d8380..707e25188 100644 --- a/deprecated/aws/accounts/data.tf +++ b/deprecated/aws/accounts/data.tf @@ -1,21 +1,21 @@ module "data" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "data" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "data_account_arn" { - value = "${module.data.account_arn}" + value = module.data.account_arn } output "data_account_id" { - value = "${module.data.account_id}" + value = module.data.account_id } output "data_organization_account_access_role" { - value = "${module.data.organization_account_access_role}" + value = module.data.organization_account_access_role } diff --git a/deprecated/aws/accounts/dev.tf b/deprecated/aws/accounts/dev.tf index ef8127b81..d2748d997 100644 --- a/deprecated/aws/accounts/dev.tf +++ b/deprecated/aws/accounts/dev.tf @@ -1,21 +1,21 @@ module "dev" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "dev" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "dev_account_arn" { - value = "${module.dev.account_arn}" + value = module.dev.account_arn } output "dev_account_id" { - value = "${module.dev.account_id}" + value = module.dev.account_id } output "dev_organization_account_access_role" { - value = "${module.dev.organization_account_access_role}" + value = module.dev.organization_account_access_role } diff --git a/deprecated/aws/accounts/identity.tf b/deprecated/aws/accounts/identity.tf index d0c22578b..0a250eba3 100644 --- a/deprecated/aws/accounts/identity.tf +++ b/deprecated/aws/accounts/identity.tf @@ -1,21 +1,21 @@ module "identity" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "identity" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "identity_account_arn" { - value = "${module.identity.account_arn}" + value = module.identity.account_arn } output "identity_account_id" { - value = "${module.identity.account_id}" + value = module.identity.account_id } output "identity_organization_account_access_role" { - value = "${module.identity.organization_account_access_role}" + value = module.identity.organization_account_access_role } diff --git a/deprecated/aws/accounts/main.tf b/deprecated/aws/accounts/main.tf index 50e8b0e91..4cd1dd90a 100644 --- a/deprecated/aws/accounts/main.tf +++ b/deprecated/aws/accounts/main.tf @@ -5,39 +5,39 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "account_role_name" { - type = "string" + type = string description = "IAM role that Organization automatically preconfigures in the new member account" default = "OrganizationAccountAccessRole" } variable "account_email" { - type = "string" + type = string description = "Email address format for accounts (e.g. `%s@cloudposse.co`)" } variable "account_iam_user_access_to_billing" { - type = "string" + type = string description = "If set to `ALLOW`, the new account enables IAM users to access account billing information if they have the required permissions. If set to `DENY`, then only the root user of the new account can access account billing information" default = "DENY" } variable "accounts_enabled" { - type = "list" + type = list(string) description = "Accounts to enable" default = ["dev", "staging", "prod", "testing", "audit"] } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } diff --git a/deprecated/aws/accounts/prod.tf b/deprecated/aws/accounts/prod.tf index e0f86f5a2..06e7c2bd3 100644 --- a/deprecated/aws/accounts/prod.tf +++ b/deprecated/aws/accounts/prod.tf @@ -1,21 +1,21 @@ module "prod" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "prod" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "prod_account_arn" { - value = "${module.prod.account_arn}" + value = module.prod.account_arn } output "prod_account_id" { - value = "${module.prod.account_id}" + value = module.prod.account_id } output "prod_organization_account_access_role" { - value = "${module.prod.organization_account_access_role}" + value = module.prod.organization_account_access_role } diff --git a/deprecated/aws/accounts/security.tf b/deprecated/aws/accounts/security.tf index ffe42ba1f..bb44d49fc 100644 --- a/deprecated/aws/accounts/security.tf +++ b/deprecated/aws/accounts/security.tf @@ -1,21 +1,21 @@ module "security" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "security" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "security_account_arn" { - value = "${module.security.account_arn}" + value = module.security.account_arn } output "security_account_id" { - value = "${module.security.account_id}" + value = module.security.account_id } output "security_organization_account_access_role" { - value = "${module.security.organization_account_access_role}" + value = module.security.organization_account_access_role } diff --git a/deprecated/aws/accounts/stage/main.tf b/deprecated/aws/accounts/stage/main.tf index 1cf66ff1a..3f8914dad 100644 --- a/deprecated/aws/accounts/stage/main.tf +++ b/deprecated/aws/accounts/stage/main.tf @@ -1,41 +1,41 @@ resource "aws_organizations_account" "default" { - count = "${local.count}" - name = "${var.stage}" - email = "${format(var.account_email, var.stage)}" - iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - role_name = "${var.account_role_name}" + count = local.count + name = var.stage + email = format(var.account_email, var.stage) + iam_user_access_to_billing = var.account_iam_user_access_to_billing + role_name = var.account_role_name } locals { - count = "${contains(var.accounts_enabled, var.stage) == true ? 1 : 0}" - account_arn = "${join("", aws_organizations_account.default.*.arn)}" - account_id = "${join("", aws_organizations_account.default.*.id)}" + count = contains(var.accounts_enabled, var.stage) == true ? 1 : 0 + account_arn = join("", aws_organizations_account.default.*.arn) + account_id = join("", aws_organizations_account.default.*.id) organization_account_access_role = "arn:aws:iam::${join("", aws_organizations_account.default.*.id)}:role/OrganizationAccountAccessRole" } resource "aws_ssm_parameter" "account_id" { - count = "${local.count}" + count = local.count name = "/${var.namespace}/${var.stage}/account_id" description = "AWS Account ID" type = "String" - value = "${local.account_id}" + value = local.account_id overwrite = "true" } resource "aws_ssm_parameter" "account_arn" { - count = "${local.count}" + count = local.count name = "/${var.namespace}/${var.stage}/account_arn" description = "AWS Account ARN" type = "String" - value = "${local.account_arn}" + value = local.account_arn overwrite = "true" } resource "aws_ssm_parameter" "organization_account_access_role" { - count = "${local.count}" + count = local.count name = "/${var.namespace}/${var.stage}/organization_account_access_role" description = "AWS Organization Account Access Role" type = "String" - value = "${local.organization_account_access_role}" + value = local.organization_account_access_role overwrite = "true" } diff --git a/deprecated/aws/accounts/stage/outputs.tf b/deprecated/aws/accounts/stage/outputs.tf index b37a2b9f9..31e4ab19e 100644 --- a/deprecated/aws/accounts/stage/outputs.tf +++ b/deprecated/aws/accounts/stage/outputs.tf @@ -1,11 +1,11 @@ output "account_arn" { - value = "${local.account_arn}" + value = local.account_arn } output "account_id" { - value = "${local.account_id}" + value = local.account_id } output "organization_account_access_role" { - value = "${local.organization_account_access_role}" + value = local.organization_account_access_role } diff --git a/deprecated/aws/accounts/stage/variables.tf b/deprecated/aws/accounts/stage/variables.tf index 35f50545f..d984e6225 100644 --- a/deprecated/aws/accounts/stage/variables.tf +++ b/deprecated/aws/accounts/stage/variables.tf @@ -1,29 +1,29 @@ variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `audit`)" } variable "account_role_name" { - type = "string" + type = string description = "IAM role that Organization automatically preconfigures in the new member account" } variable "account_email" { - type = "string" + type = string description = "Email address format for accounts (e.g. `%s@cloudposse.co`)" } variable "account_iam_user_access_to_billing" { - type = "string" + type = string description = "If set to `ALLOW`, the new account enables IAM users to access account billing information if they have the required permissions. If set to `DENY`, then only the root user of the new account can access account billing information" } variable "accounts_enabled" { - type = "list" + type = list(string) description = "Accounts to enable" } diff --git a/deprecated/aws/accounts/staging.tf b/deprecated/aws/accounts/staging.tf index d2d78c97c..76f348531 100644 --- a/deprecated/aws/accounts/staging.tf +++ b/deprecated/aws/accounts/staging.tf @@ -1,21 +1,21 @@ module "staging" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "staging" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "staging_account_arn" { - value = "${module.staging.account_arn}" + value = module.staging.account_arn } output "staging_account_id" { - value = "${module.staging.account_id}" + value = module.staging.account_id } output "staging_organization_account_access_role" { - value = "${module.staging.organization_account_access_role}" + value = module.staging.organization_account_access_role } diff --git a/deprecated/aws/accounts/testing.tf b/deprecated/aws/accounts/testing.tf index 89ba2e589..f250253aa 100644 --- a/deprecated/aws/accounts/testing.tf +++ b/deprecated/aws/accounts/testing.tf @@ -1,21 +1,21 @@ module "testing" { source = "stage" - namespace = "${var.namespace}" + namespace = var.namespace stage = "testing" - accounts_enabled = "${var.accounts_enabled}" - account_email = "${var.account_email}" - account_iam_user_access_to_billing = "${var.account_iam_user_access_to_billing}" - account_role_name = "${var.account_role_name}" + accounts_enabled = var.accounts_enabled + account_email = var.account_email + account_iam_user_access_to_billing = var.account_iam_user_access_to_billing + account_role_name = var.account_role_name } output "testing_account_arn" { - value = "${module.testing.account_arn}" + value = module.testing.account_arn } output "testing_account_id" { - value = "${module.testing.account_id}" + value = module.testing.account_id } output "testing_organization_account_access_role" { - value = "${module.testing.organization_account_access_role}" + value = module.testing.organization_account_access_role } diff --git a/deprecated/aws/acm-cloudfront/main.tf b/deprecated/aws/acm-cloudfront/main.tf index ca9a32b5f..86e9d9546 100644 --- a/deprecated/aws/acm-cloudfront/main.tf +++ b/deprecated/aws/acm-cloudfront/main.tf @@ -5,7 +5,7 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } provider "aws" { @@ -17,7 +17,7 @@ provider "aws" { region = "us-east-1" assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -27,24 +27,24 @@ variable "domain_name" { module "certificate" { source = "git::https://github.com/cloudposse/terraform-aws-acm-request-certificate.git?ref=tags/0.1.1" - domain_name = "${var.domain_name}" + domain_name = var.domain_name proces_domain_validation_options = "true" ttl = "300" subject_alternative_names = ["*.${var.domain_name}"] } output "certificate_domain_name" { - value = "${var.domain_name}" + value = var.domain_name } output "certificate_id" { - value = "${module.certificate.id}" + value = module.certificate.id } output "certificate_arn" { - value = "${module.certificate.arn}" + value = module.certificate.arn } output "certificate_domain_validation_options" { - value = "${module.certificate.domain_validation_options}" + value = module.certificate.domain_validation_options } diff --git a/deprecated/aws/acm-teleport/main.tf b/deprecated/aws/acm-teleport/main.tf index aa3ebfbf1..f21fbe6da 100644 --- a/deprecated/aws/acm-teleport/main.tf +++ b/deprecated/aws/acm-teleport/main.tf @@ -8,21 +8,21 @@ variable "aws_assume_role_arn" {} provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } module "certificate" { source = "git::https://github.com/cloudposse/terraform-aws-acm-request-certificate.git?ref=tags/0.1.1" - domain_name = "${var.domain_name}" + domain_name = var.domain_name proces_domain_validation_options = "true" ttl = "300" subject_alternative_names = ["*.${var.domain_name}"] } resource "aws_ssm_parameter" "certificate_arn_parameter" { - name = "${format(var.chamber_parameter_name, var.chamber_service, var.certificate_arn_parameter_name)}" - value = "${module.certificate.arn}" + name = format(var.chamber_parameter_name, var.chamber_service, var.certificate_arn_parameter_name) + value = module.certificate.arn description = "Teleport ACM-issued TLS Certificate AWS ARN" type = "String" overwrite = "true" diff --git a/deprecated/aws/acm-teleport/outputs.tf b/deprecated/aws/acm-teleport/outputs.tf index 1b6994fcb..5ba8c2564 100644 --- a/deprecated/aws/acm-teleport/outputs.tf +++ b/deprecated/aws/acm-teleport/outputs.tf @@ -1,15 +1,15 @@ output "certificate_domain_name" { - value = "${var.domain_name}" + value = var.domain_name } output "certificate_id" { - value = "${module.certificate.id}" + value = module.certificate.id } output "certificate_arn" { - value = "${module.certificate.arn}" + value = module.certificate.arn } output "certificate_domain_validation_options" { - value = "${module.certificate.domain_validation_options}" + value = module.certificate.domain_validation_options } diff --git a/deprecated/aws/acm/variables.tf b/deprecated/aws/acm/variables.tf index 0b76b56f9..19985903b 100644 --- a/deprecated/aws/acm/variables.tf +++ b/deprecated/aws/acm/variables.tf @@ -24,4 +24,3 @@ variable "chamber_parameter_name_format" { description = "Format string for combining `chamber` service name and parameter name. It is rare to need to set this." default = "/%s/%s" } - diff --git a/deprecated/aws/artifacts/main.tf b/deprecated/aws/artifacts/main.tf index bee2adc84..6fe5c7261 100644 --- a/deprecated/aws/artifacts/main.tf +++ b/deprecated/aws/artifacts/main.tf @@ -9,14 +9,14 @@ provider "aws" { region = "us-east-1" assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } # https://www.terraform.io/artifacts/providers/aws/d/acm_certificate.html data "aws_acm_certificate" "acm_cloudfront_certificate" { provider = "aws.virginia" - domain = "${var.domain_name}" + domain = var.domain_name statuses = ["ISSUED"] types = ["AMAZON_ISSUED"] } @@ -29,19 +29,19 @@ locals { module "artifacts_user" { source = "git::https://github.com/cloudposse/terraform-aws-iam-system-user.git?ref=tags/0.2.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" + namespace = var.namespace + stage = var.stage + name = local.name } module "origin" { source = "git::https://github.com/cloudposse/terraform-aws-s3-website.git?ref=tags/0.5.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" - hostname = "${local.cdn_domain}" - parent_zone_name = "${var.domain_name}" - region = "${var.region}" + namespace = var.namespace + stage = var.stage + name = local.name + hostname = local.cdn_domain + parent_zone_name = var.domain_name + region = var.region cors_allowed_headers = ["*"] cors_allowed_methods = ["GET"] cors_allowed_origins = ["*"] @@ -66,14 +66,14 @@ module "origin" { # CloudFront CDN fronting origin module "cdn" { source = "git::https://github.com/cloudposse/terraform-aws-cloudfront-cdn.git?ref=tags/0.5.7" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" + namespace = var.namespace + stage = var.stage + name = local.name aliases = ["${local.cdn_domain}", "artifacts.cloudposse.com"] - origin_domain_name = "${module.origin.s3_bucket_website_endpoint}" + origin_domain_name = module.origin.s3_bucket_website_endpoint origin_protocol_policy = "http-only" viewer_protocol_policy = "redirect-to-https" - parent_zone_name = "${var.domain_name}" + parent_zone_name = var.domain_name forward_cookies = "none" forward_headers = ["Origin", "Access-Control-Request-Headers", "Access-Control-Request-Method"] default_ttl = 60 @@ -84,5 +84,5 @@ module "cdn" { allowed_methods = ["GET", "HEAD", "OPTIONS"] price_class = "PriceClass_All" default_root_object = "index.html" - acm_certificate_arn = "${data.aws_acm_certificate.acm_cloudfront_certificate.arn}" + acm_certificate_arn = data.aws_acm_certificate.acm_cloudfront_certificate.arn } diff --git a/deprecated/aws/artifacts/outputs.tf b/deprecated/aws/artifacts/outputs.tf index 17b6f495e..9c8816058 100644 --- a/deprecated/aws/artifacts/outputs.tf +++ b/deprecated/aws/artifacts/outputs.tf @@ -1,94 +1,94 @@ output "artifacts_user_name" { - value = "${module.artifacts_user.user_name}" + value = module.artifacts_user.user_name description = "Normalized IAM user name" } output "artifacts_user_arn" { - value = "${module.artifacts_user.user_arn}" + value = module.artifacts_user.user_arn description = "The ARN assigned by AWS for the user" } output "artifacts_user_unique_id" { - value = "${module.artifacts_user.user_unique_id}" + value = module.artifacts_user.user_unique_id description = "The user unique ID assigned by AWS" } output "artifacts_user_access_key_id" { - value = "${module.artifacts_user.access_key_id}" + value = module.artifacts_user.access_key_id description = "The access key ID" } output "artifacts_user_secret_access_key" { - value = "${module.artifacts_user.secret_access_key}" + value = module.artifacts_user.secret_access_key description = "The secret access key. This will be written to the state file in plain-text" } output "artifacts_s3_bucket_name" { - value = "${module.origin.s3_bucket_name}" + value = module.origin.s3_bucket_name description = "The S3 bucket which serves as the origin for the CDN and S3 website" } output "artifacts_s3_bucket_domain_name" { - value = "${module.origin.s3_bucket_domain_name}" + value = module.origin.s3_bucket_domain_name description = "The bucket domain name. Will be of format bucketname.s3.amazonaws.com." } output "artifacts_s3_bucket_arn" { - value = "${module.origin.s3_bucket_arn}" + value = module.origin.s3_bucket_arn description = "The ARN of the bucket. Will be of format arn:aws:s3:::bucketname." } output "artifacts_s3_bucket_website_endpoint" { - value = "${module.origin.s3_bucket_website_endpoint}" + value = module.origin.s3_bucket_website_endpoint description = "The website endpoint, if the bucket is configured with a website. If not, this will be an empty string." } output "artifacts_s3_bucket_website_domain" { - value = "${module.origin.s3_bucket_website_domain}" + value = module.origin.s3_bucket_website_domain description = "The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records." } output "artifacts_s3_bucket_hosted_zone_id" { - value = "${module.origin.s3_bucket_hosted_zone_id}" + value = module.origin.s3_bucket_hosted_zone_id description = "The Route 53 Hosted Zone ID for this bucket's region." } output "artifacts_cloudfront_id" { - value = "${module.cdn.cf_id}" + value = module.cdn.cf_id description = "The identifier for the distribution. For example: EDFDVBD632BHDS5." } output "artifacts_cloudfront_arn" { - value = "${module.cdn.cf_arn}" + value = module.cdn.cf_arn description = "The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, where 123456789012 is your AWS account ID." } output "artifacts_cloudfront_aliases" { - value = "${module.cdn.cf_aliases}" + value = module.cdn.cf_aliases description = "Extra CNAMEs (alternate domain names), if any, for this distribution." } output "artifacts_cloudfront_status" { - value = "${module.cdn.cf_status}" + value = module.cdn.cf_status description = "The current status of the distribution. Deployed if the distribution's information is fully propagated throughout the Amazon CloudFront system." } output "artifacts_cloudfront_domain_name" { - value = "${module.cdn.cf_domain_name}" + value = module.cdn.cf_domain_name description = "The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net." } output "artifacts_cloudfront_etag" { - value = "${module.cdn.cf_etag}" + value = module.cdn.cf_etag description = "The current version of the distribution's information. For example: E2QWRUHAPOMQZL." } output "artifacts_cloudfront_hosted_zone_id" { - value = "${module.cdn.cf_hosted_zone_id}" + value = module.cdn.cf_hosted_zone_id description = "The CloudFront Route 53 zone ID that can be used to route an Alias Resource Record Set to. This attribute is simply an alias for the zone ID Z2FDTNDATAQYW2." } output "artifacts_cloudfront_origin_access_identity_path" { - value = "${module.cdn.cf_origin_access_identity}" + value = module.cdn.cf_origin_access_identity description = "The CloudFront origin access identity to associate with the origin." } diff --git a/deprecated/aws/artifacts/variables.tf b/deprecated/aws/artifacts/variables.tf index 7d5d33392..c0c635312 100644 --- a/deprecated/aws/artifacts/variables.tf +++ b/deprecated/aws/artifacts/variables.tf @@ -1,28 +1,28 @@ variable "aws_assume_role_arn" { - type = "string" + type = string description = "The ARN of the role to assume" } variable "domain_name" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "region" { - type = "string" + type = string description = "AWS region" } variable "aws_account_id" { - type = "string" + type = string description = "AWS account ID" } diff --git a/deprecated/aws/audit-cloudtrail/cloudwatch_logs.tf b/deprecated/aws/audit-cloudtrail/cloudwatch_logs.tf index a963ad9d6..23135b8df 100644 --- a/deprecated/aws/audit-cloudtrail/cloudwatch_logs.tf +++ b/deprecated/aws/audit-cloudtrail/cloudwatch_logs.tf @@ -1,11 +1,11 @@ module "logs" { source = "git::https://github.com/cloudposse/terraform-aws-cloudwatch-logs.git?ref=tags/0.3.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name attributes = ["cloudwatch", "logs"] - retention_in_days = "${var.cloudwatch_logs_retention_in_days}" + retention_in_days = var.cloudwatch_logs_retention_in_days principals = { Service = ["cloudtrail.amazonaws.com"] @@ -18,16 +18,16 @@ module "logs" { module "kms_key_logs" { source = "git::https://github.com/cloudposse/terraform-aws-kms-key.git?ref=tags/0.1.3" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" + namespace = var.namespace + name = var.name + stage = var.stage attributes = ["cloudwatch", "logs"] description = "KMS key for CloudWatch" deletion_window_in_days = 10 enable_key_rotation = "true" - policy = "${data.aws_iam_policy_document.kms_key_logs.json}" + policy = data.aws_iam_policy_document.kms_key_logs.json } data "aws_iam_policy_document" "kms_key_logs" { diff --git a/deprecated/aws/audit-cloudtrail/main.tf b/deprecated/aws/audit-cloudtrail/main.tf index 93847f9a5..6ac268b0e 100644 --- a/deprecated/aws/audit-cloudtrail/main.tf +++ b/deprecated/aws/audit-cloudtrail/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -15,35 +15,35 @@ data "aws_caller_identity" "default" {} data "aws_region" "default" {} locals { - region = "${length(var.region) > 0 ? var.region : data.aws_region.default.name}" + region = length(var.region) > 0 ? var.region : data.aws_region.default.name } module "cloudtrail" { source = "git::https://github.com/cloudposse/terraform-aws-cloudtrail.git?ref=tags/0.7.1" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name enable_logging = "true" enable_log_file_validation = "true" include_global_service_events = "true" is_multi_region_trail = "true" - s3_bucket_name = "${module.cloudtrail_s3_bucket.bucket_id}" - kms_key_arn = "${module.kms_key_cloudtrail.alias_arn}" - cloud_watch_logs_group_arn = "${module.logs.log_group_arn}" - cloud_watch_logs_role_arn = "${module.logs.role_arn}" + s3_bucket_name = module.cloudtrail_s3_bucket.bucket_id + kms_key_arn = module.kms_key_cloudtrail.alias_arn + cloud_watch_logs_group_arn = module.logs.log_group_arn + cloud_watch_logs_role_arn = module.logs.role_arn } module "kms_key_cloudtrail" { source = "git::https://github.com/cloudposse/terraform-aws-kms-key.git?ref=tags/0.1.3" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" + namespace = var.namespace + name = var.name + stage = var.stage description = "KMS key for CloudTrail" deletion_window_in_days = 10 enable_key_rotation = "true" - policy = "${data.aws_iam_policy_document.kms_key_cloudtrail.json}" + policy = data.aws_iam_policy_document.kms_key_cloudtrail.json } data "aws_iam_policy_document" "kms_key_cloudtrail" { diff --git a/deprecated/aws/audit-cloudtrail/output.tf b/deprecated/aws/audit-cloudtrail/output.tf index 81ae5425f..74d69d677 100644 --- a/deprecated/aws/audit-cloudtrail/output.tf +++ b/deprecated/aws/audit-cloudtrail/output.tf @@ -1,15 +1,15 @@ output "cloudtrail_kms_key_arn" { - value = "${module.kms_key_cloudtrail.alias_arn}" + value = module.kms_key_cloudtrail.alias_arn } output "cloudtrail_bucket_domain_name" { - value = "${module.cloudtrail_s3_bucket.bucket_domain_name}" + value = module.cloudtrail_s3_bucket.bucket_domain_name } output "cloudtrail_bucket_id" { - value = "${module.cloudtrail_s3_bucket.bucket_id}" + value = module.cloudtrail_s3_bucket.bucket_id } output "cloudtrail_bucket_arn" { - value = "${module.cloudtrail_s3_bucket.bucket_arn}" + value = module.cloudtrail_s3_bucket.bucket_arn } diff --git a/deprecated/aws/audit-cloudtrail/s3_bucket.tf b/deprecated/aws/audit-cloudtrail/s3_bucket.tf index 9f781322e..ffe61e47a 100644 --- a/deprecated/aws/audit-cloudtrail/s3_bucket.tf +++ b/deprecated/aws/audit-cloudtrail/s3_bucket.tf @@ -1,21 +1,21 @@ module "cloudtrail_s3_bucket" { source = "git::https://github.com/cloudposse/terraform-aws-cloudtrail-s3-bucket.git?ref=tags/0.3.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - region = "${local.region}" + namespace = var.namespace + stage = var.stage + name = var.name + region = local.region sse_algorithm = "aws:kms" - kms_master_key_arn = "${module.kms_key_s3_bucket.alias_arn}" + kms_master_key_arn = module.kms_key_s3_bucket.alias_arn access_logs_sse_algorithm = "aws:kms" - access_logs_kms_master_key_arn = "${module.kms_key_s3_bucket_logs.alias_arn}" + access_logs_kms_master_key_arn = module.kms_key_s3_bucket_logs.alias_arn } module "kms_key_s3_bucket" { source = "git::https://github.com/cloudposse/terraform-aws-kms-key.git?ref=tags/0.1.3" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" + namespace = var.namespace + name = var.name + stage = var.stage attributes = ["cloudtrail", "s3", "bucket"] @@ -26,9 +26,9 @@ module "kms_key_s3_bucket" { module "kms_key_s3_bucket_logs" { source = "git::https://github.com/cloudposse/terraform-aws-kms-key.git?ref=tags/0.1.3" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" + namespace = var.namespace + name = var.name + stage = var.stage attributes = ["cloudtrail", "s3", "bucket", "logs"] diff --git a/deprecated/aws/audit-cloudtrail/varaibles.tf b/deprecated/aws/audit-cloudtrail/varaibles.tf index cca94fffb..c3fc177be 100644 --- a/deprecated/aws/audit-cloudtrail/varaibles.tf +++ b/deprecated/aws/audit-cloudtrail/varaibles.tf @@ -1,26 +1,26 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `audit`)" default = "audit" } variable "name" { - type = "string" + type = string description = "Name (e.g. `account`)" default = "account" } variable "region" { - type = "string" + type = string description = "AWS region" default = "" } diff --git a/deprecated/aws/aws-metrics-role/main.tf b/deprecated/aws/aws-metrics-role/main.tf index 361b38918..f9a6dd53f 100644 --- a/deprecated/aws/aws-metrics-role/main.tf +++ b/deprecated/aws/aws-metrics-role/main.tf @@ -57,8 +57,8 @@ data "aws_iam_policy_document" "assume_role" { } resource "aws_iam_role_policy_attachment" "default" { - role = "${aws_iam_role.default.name}" - policy_arn = "${aws_iam_policy.default.arn}" + role = aws_iam_role.default.name + policy_arn = aws_iam_policy.default.arn lifecycle { create_before_destroy = true @@ -66,9 +66,9 @@ resource "aws_iam_role_policy_attachment" "default" { } resource "aws_iam_policy" "default" { - name = "${module.label.id}" + name = module.label.id description = "Grant permissions for external-dns" - policy = "${data.aws_iam_policy_document.default.json}" + policy = data.aws_iam_policy_document.default.json } data "aws_iam_policy_document" "default" { diff --git a/deprecated/aws/aws-metrics-role/variables.tf b/deprecated/aws/aws-metrics-role/variables.tf index b55d3f4ce..5fd6eea75 100644 --- a/deprecated/aws/aws-metrics-role/variables.tf +++ b/deprecated/aws/aws-metrics-role/variables.tf @@ -43,7 +43,7 @@ variable "kops_cluster_name" { } variable "assume_role_permitted_roles" { - type = "string" + type = string description = "Roles that are permitted to assume thie role. One of 'kiam', 'nodes', 'masters', or 'both' (nodes + masters)." default = "kiam" } @@ -70,4 +70,3 @@ variable "max_session_duration" { default = 3600 description = "The maximum session duration (in seconds) for the role. Can have a value from 1 hour to 12 hours" } - diff --git a/deprecated/aws/aws-metrics-role/versions.tf b/deprecated/aws/aws-metrics-role/versions.tf index 2502a985d..9498a4a22 100644 --- a/deprecated/aws/aws-metrics-role/versions.tf +++ b/deprecated/aws/aws-metrics-role/versions.tf @@ -6,4 +6,3 @@ terraform { kubernetes = "~> 1.8" } } - diff --git a/deprecated/aws/backing-services/README.md b/deprecated/aws/backing-services/README.md index b51605ac8..a8f8a707e 100644 --- a/deprecated/aws/backing-services/README.md +++ b/deprecated/aws/backing-services/README.md @@ -7,6 +7,6 @@ aws_security_group.default: Error authorizing security group ingress rules: InvalidGroup.NotFound: You have specified two resources that belong to different networks. ``` -### Answer +### Answer Ensure that the VPC peering with the Kops cluster has been setup. diff --git a/deprecated/aws/backing-services/aurora-mysql.tf b/deprecated/aws/backing-services/aurora-mysql.tf index 67752a30b..95c167ae7 100644 --- a/deprecated/aws/backing-services/aurora-mysql.tf +++ b/deprecated/aws/backing-services/aurora-mysql.tf @@ -1,44 +1,44 @@ # https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html variable "mysql_name" { - type = "string" + type = string description = "Name of the application, e.g. `app` or `analytics`" default = "mysql" } variable "mysql_admin_user" { - type = "string" + type = string description = "MySQL admin user name" default = "" } variable "mysql_admin_password" { - type = "string" + type = string description = "MySQL password for the admin user" default = "" } variable "mysql_db_name" { - type = "string" + type = string description = "MySQL database name" default = "" } # https://aws.amazon.com/rds/aurora/pricing variable "mysql_instance_type" { - type = "string" + type = string default = "db.t2.small" description = "EC2 instance type for Aurora MySQL cluster" } variable "mysql_cluster_size" { - type = "string" + type = string default = "2" description = "MySQL cluster size" } variable "mysql_cluster_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } @@ -49,134 +49,134 @@ variable "mysql_cluster_publicly_accessible" { } variable "mysql_cluster_allowed_cidr_blocks" { - type = "list" + type = list(string) default = ["0.0.0.0/0"] description = "List of CIDR blocks allowed to access the cluster" } resource "random_pet" "mysql_db_name" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" + count = local.mysql_cluster_enabled ? 1 : 0 separator = "_" } resource "random_string" "mysql_admin_user" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" + count = local.mysql_cluster_enabled ? 1 : 0 length = 8 number = false special = false } resource "random_string" "mysql_admin_password" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" + count = local.mysql_cluster_enabled ? 1 : 0 length = 16 special = true } locals { - mysql_cluster_enabled = "${var.mysql_cluster_enabled == "true"}" - mysql_admin_user = "${length(var.mysql_admin_user) > 0 ? var.mysql_admin_user : join("", random_string.mysql_admin_user.*.result)}" - mysql_admin_password = "${length(var.mysql_admin_password) > 0 ? var.mysql_admin_password : join("", random_string.mysql_admin_password.*.result)}" - mysql_db_name = "${join("", random_pet.mysql_db_name.*.id)}" + mysql_cluster_enabled = var.mysql_cluster_enabled == "true" + mysql_admin_user = length(var.mysql_admin_user) > 0 ? var.mysql_admin_user : join("", random_string.mysql_admin_user.*.result) + mysql_admin_password = length(var.mysql_admin_password) > 0 ? var.mysql_admin_password : join("", random_string.mysql_admin_password.*.result) + mysql_db_name = join("", random_pet.mysql_db_name.*.id) } module "aurora_mysql" { source = "git::https://github.com/cloudposse/terraform-aws-rds-cluster.git?ref=tags/0.8.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.mysql_name}" + namespace = var.namespace + stage = var.stage + name = var.mysql_name engine = "aurora-mysql" cluster_family = "aurora-mysql5.7" - instance_type = "${var.mysql_instance_type}" - cluster_size = "${var.mysql_cluster_size}" - admin_user = "${local.mysql_admin_user}" - admin_password = "${local.mysql_admin_password}" - db_name = "${local.mysql_db_name}" + instance_type = var.mysql_instance_type + cluster_size = var.mysql_cluster_size + admin_user = local.mysql_admin_user + admin_password = local.mysql_admin_password + db_name = local.mysql_db_name db_port = "3306" - vpc_id = "${module.vpc.vpc_id}" + vpc_id = module.vpc.vpc_id # Use module.subnets.private_subnet_ids if the cluster does not need to be publicly accessible subnets = ["${module.subnets.public_subnet_ids}"] - zone_id = "${local.zone_id}" - enabled = "${var.mysql_cluster_enabled}" - publicly_accessible = "${var.mysql_cluster_publicly_accessible}" - allowed_cidr_blocks = "${var.mysql_cluster_allowed_cidr_blocks}" + zone_id = local.zone_id + enabled = var.mysql_cluster_enabled + publicly_accessible = var.mysql_cluster_publicly_accessible + allowed_cidr_blocks = var.mysql_cluster_allowed_cidr_blocks } resource "aws_ssm_parameter" "aurora_mysql_database_name" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_database_name")}" - value = "${module.aurora_mysql.name}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_database_name") + value = module.aurora_mysql.name description = "Aurora MySQL Database Name" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_master_username" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_master_username")}" - value = "${module.aurora_mysql.user}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_master_username") + value = module.aurora_mysql.user description = "Aurora MySQL Username for the master DB user" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_master_password" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_master_password")}" - value = "${module.aurora_mysql.password}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_master_password") + value = module.aurora_mysql.password description = "Aurora MySQL Password for the master DB user" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_master_hostname" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_master_hostname")}" - value = "${module.aurora_mysql.master_host}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_master_hostname") + value = module.aurora_mysql.master_host description = "Aurora MySQL DB Master hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_replicas_hostname" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_replicas_hostname")}" - value = "${module.aurora_mysql.replicas_host}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_replicas_hostname") + value = module.aurora_mysql.replicas_host description = "Aurora MySQL DB Replicas hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_cluster_name" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_cluster_name")}" - value = "${module.aurora_mysql.cluster_name}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_mysql_cluster_name") + value = module.aurora_mysql.cluster_name description = "Aurora MySQL DB Cluster Identifier" type = "String" overwrite = "true" } output "aurora_mysql_database_name" { - value = "${module.aurora_mysql.name}" + value = module.aurora_mysql.name description = "Aurora MySQL Database name" } output "aurora_mysql_master_username" { - value = "${module.aurora_mysql.user}" + value = module.aurora_mysql.user description = "Aurora MySQL Username for the master DB user" } output "aurora_mysql_master_hostname" { - value = "${module.aurora_mysql.master_host}" + value = module.aurora_mysql.master_host description = "Aurora MySQL DB Master hostname" } output "aurora_mysql_replicas_hostname" { - value = "${module.aurora_mysql.replicas_host}" + value = module.aurora_mysql.replicas_host description = "Aurora MySQL Replicas hostname" } output "aurora_mysql_cluster_name" { - value = "${module.aurora_mysql.cluster_name}" + value = module.aurora_mysql.cluster_name description = "Aurora MySQL Cluster Identifier" } diff --git a/deprecated/aws/backing-services/aurora-postgres-replica.tf b/deprecated/aws/backing-services/aurora-postgres-replica.tf index 3657fee85..3c55c7b84 100644 --- a/deprecated/aws/backing-services/aurora-postgres-replica.tf +++ b/deprecated/aws/backing-services/aurora-postgres-replica.tf @@ -1,5 +1,5 @@ variable "postgres_replica_name" { - type = "string" + type = string description = "Name of the replica, e.g. `postgres` or `reporting`" default = "postgres" } @@ -7,75 +7,75 @@ variable "postgres_replica_name" { # db.r4.large is the smallest instance type supported by Aurora Postgres # https://aws.amazon.com/rds/aurora/pricing variable "postgres_replica_instance_type" { - type = "string" + type = string default = "db.r4.large" description = "EC2 instance type for Postgres cluster" } variable "postgres_replica_cluster_size" { - type = "string" + type = string default = "2" description = "Postgres cluster size" } variable "postgres_replica_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } variable "postgres_replica_cluster_identifier" { - type = "string" + type = string description = "The cluster identifier" default = "" } locals { - postgres_replica_enabled = "${var.postgres_replica_enabled == "true"}" + postgres_replica_enabled = var.postgres_replica_enabled == "true" } module "postgres_replica" { source = "git::https://github.com/cloudposse/terraform-aws-rds-cluster-instance-group.git?ref=tags/0.1.0" - enabled = "${var.postgres_replica_enabled}" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.postgres_replica_name}" - cluster_identifier = "${var.postgres_replica_cluster_identifier}" + enabled = var.postgres_replica_enabled + namespace = var.namespace + stage = var.stage + name = var.postgres_replica_name + cluster_identifier = var.postgres_replica_cluster_identifier cluster_family = "aurora-postgresql9.6" engine = "aurora-postgresql" - instance_type = "${var.postgres_replica_instance_type}" - cluster_size = "${var.postgres_replica_cluster_size}" + instance_type = var.postgres_replica_instance_type + cluster_size = var.postgres_replica_cluster_size db_port = "5432" - vpc_id = "${module.vpc.vpc_id}" + vpc_id = module.vpc.vpc_id subnets = ["${module.subnets.private_subnet_ids}"] - zone_id = "${local.zone_id}" + zone_id = local.zone_id security_groups = ["${module.kops_metadata.nodes_security_group_id}"] } resource "aws_ssm_parameter" "postgres_replica_hostname" { - count = "${local.postgres_replica_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "postgres_replica_hostname")}" - value = "${module.postgres_replica.hostname}" + count = local.postgres_replica_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "postgres_replica_hostname") + value = module.postgres_replica.hostname description = "RDS Cluster replica hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "postgres_replica_endpoint" { - count = "${local.postgres_replica_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "postgres_replica_endpoint")}" - value = "${module.postgres_replica.endpoint}" + count = local.postgres_replica_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "postgres_replica_endpoint") + value = module.postgres_replica.endpoint description = "RDS Cluster Replicas hostname" type = "String" overwrite = "true" } output "postgres_replica_hostname" { - value = "${module.postgres_replica.hostname}" + value = module.postgres_replica.hostname description = "RDS Cluster replica hostname" } output "postgres_replica_endpoint" { - value = "${module.postgres_replica.endpoint}" + value = module.postgres_replica.endpoint description = "RDS Cluster replica endpoint" } diff --git a/deprecated/aws/backing-services/aurora-postgres.tf b/deprecated/aws/backing-services/aurora-postgres.tf index 39f2f7f45..e11d83fe3 100644 --- a/deprecated/aws/backing-services/aurora-postgres.tf +++ b/deprecated/aws/backing-services/aurora-postgres.tf @@ -1,14 +1,14 @@ variable "postgres_name" { - type = "string" + type = string description = "Name of the application, e.g. `app` or `analytics`" default = "postgres" } -# Don't use `admin` +# Don't use `admin` # Read more: # ("MasterUsername admin cannot be used as it is a reserved word used by the engine") variable "postgres_admin_user" { - type = "string" + type = string description = "Postgres admin user name" default = "" } @@ -17,13 +17,13 @@ variable "postgres_admin_user" { # Read more: # ("The parameter MasterUserPassword is not a valid password because it is shorter than 8 characters") variable "postgres_admin_password" { - type = "string" + type = string description = "Postgres password for the admin user" default = "" } variable "postgres_db_name" { - type = "string" + type = string description = "Postgres database name" default = "" } @@ -31,151 +31,151 @@ variable "postgres_db_name" { # db.r4.large is the smallest instance type supported by Aurora Postgres # https://aws.amazon.com/rds/aurora/pricing variable "postgres_instance_type" { - type = "string" + type = string default = "db.r4.large" description = "EC2 instance type for Postgres cluster" } variable "postgres_cluster_size" { - type = "string" + type = string default = "2" description = "Postgres cluster size" } variable "postgres_cluster_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } variable "postgres_iam_database_authentication_enabled" { - type = "string" + type = string default = "false" description = "Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled." } resource "random_pet" "postgres_db_name" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" + count = local.postgres_cluster_enabled ? 1 : 0 separator = "_" } resource "random_string" "postgres_admin_user" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" + count = local.postgres_cluster_enabled ? 1 : 0 length = 8 special = false number = false } resource "random_string" "postgres_admin_password" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" + count = local.postgres_cluster_enabled ? 1 : 0 length = 16 special = true } locals { - postgres_cluster_enabled = "${var.postgres_cluster_enabled == "true"}" - postgres_admin_user = "${length(var.postgres_admin_user) > 0 ? var.postgres_admin_user : join("", random_string.postgres_admin_user.*.result)}" - postgres_admin_password = "${length(var.postgres_admin_password) > 0 ? var.postgres_admin_password : join("", random_string.postgres_admin_password.*.result)}" - postgres_db_name = "${join("", random_pet.postgres_db_name.*.id)}" + postgres_cluster_enabled = var.postgres_cluster_enabled == "true" + postgres_admin_user = length(var.postgres_admin_user) > 0 ? var.postgres_admin_user : join("", random_string.postgres_admin_user.*.result) + postgres_admin_password = length(var.postgres_admin_password) > 0 ? var.postgres_admin_password : join("", random_string.postgres_admin_password.*.result) + postgres_db_name = join("", random_pet.postgres_db_name.*.id) } module "aurora_postgres" { source = "git::https://github.com/cloudposse/terraform-aws-rds-cluster.git?ref=tags/0.8.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.postgres_name}" + namespace = var.namespace + stage = var.stage + name = var.postgres_name engine = "aurora-postgresql" cluster_family = "aurora-postgresql9.6" - instance_type = "${var.postgres_instance_type}" - cluster_size = "${var.postgres_cluster_size}" - admin_user = "${local.postgres_admin_user}" - admin_password = "${local.postgres_admin_password}" - db_name = "${local.postgres_db_name}" + instance_type = var.postgres_instance_type + cluster_size = var.postgres_cluster_size + admin_user = local.postgres_admin_user + admin_password = local.postgres_admin_password + db_name = local.postgres_db_name db_port = "5432" - vpc_id = "${module.vpc.vpc_id}" + vpc_id = module.vpc.vpc_id subnets = ["${module.subnets.private_subnet_ids}"] - zone_id = "${local.zone_id}" + zone_id = local.zone_id security_groups = ["${module.kops_metadata.nodes_security_group_id}"] - enabled = "${var.postgres_cluster_enabled}" + enabled = var.postgres_cluster_enabled - iam_database_authentication_enabled = "${var.postgres_iam_database_authentication_enabled}" + iam_database_authentication_enabled = var.postgres_iam_database_authentication_enabled } resource "aws_ssm_parameter" "aurora_postgres_database_name" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_database_name")}" - value = "${module.aurora_postgres.name}" + count = local.postgres_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_database_name") + value = module.aurora_postgres.name description = "Aurora Postgres Database Name" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_postgres_master_username" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_master_username")}" - value = "${module.aurora_postgres.user}" + count = local.postgres_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_master_username") + value = module.aurora_postgres.user description = "Aurora Postgres Username for the master DB user" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_postgres_master_password" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_master_password")}" - value = "${module.aurora_postgres.password}" + count = local.postgres_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_master_password") + value = module.aurora_postgres.password description = "Aurora Postgres Password for the master DB user" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_postgres_master_hostname" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_master_hostname")}" - value = "${module.aurora_postgres.master_host}" + count = local.postgres_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_master_hostname") + value = module.aurora_postgres.master_host description = "Aurora Postgres DB Master hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_postgres_replicas_hostname" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_replicas_hostname")}" - value = "${module.aurora_postgres.replicas_host}" + count = local.postgres_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_replicas_hostname") + value = module.aurora_postgres.replicas_host description = "Aurora Postgres DB Replicas hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_postgres_cluster_name" { - count = "${local.postgres_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_cluster_name")}" - value = "${module.aurora_postgres.cluster_name}" + count = local.postgres_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "aurora_postgres_cluster_name") + value = module.aurora_postgres.cluster_name description = "Aurora Postgres DB Cluster Identifier" type = "String" overwrite = "true" } output "aurora_postgres_database_name" { - value = "${module.aurora_postgres.name}" + value = module.aurora_postgres.name description = "Aurora Postgres Database name" } output "aurora_postgres_master_username" { - value = "${module.aurora_postgres.user}" + value = module.aurora_postgres.user description = "Aurora Postgres Username for the master DB user" } output "aurora_postgres_master_hostname" { - value = "${module.aurora_postgres.master_host}" + value = module.aurora_postgres.master_host description = "Aurora Postgres DB Master hostname" } output "aurora_postgres_replicas_hostname" { - value = "${module.aurora_postgres.replicas_host}" + value = module.aurora_postgres.replicas_host description = "Aurora Postgres Replicas hostname" } output "aurora_postgres_cluster_name" { - value = "${module.aurora_postgres.cluster_name}" + value = module.aurora_postgres.cluster_name description = "Aurora Postgres Cluster Identifier" } diff --git a/deprecated/aws/backing-services/elasticache-redis.tf b/deprecated/aws/backing-services/elasticache-redis.tf index fd57118f2..bf129c2dc 100644 --- a/deprecated/aws/backing-services/elasticache-redis.tf +++ b/deprecated/aws/backing-services/elasticache-redis.tf @@ -1,58 +1,58 @@ variable "redis_name" { - type = "string" + type = string default = "redis" description = "Redis name" } variable "redis_instance_type" { - type = "string" + type = string default = "cache.t2.medium" description = "EC2 instance type for Redis cluster" } variable "redis_cluster_size" { - type = "string" + type = string default = "2" description = "Redis cluster size" } variable "redis_cluster_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } variable "redis_auth_token" { - type = "string" + type = string default = "" description = "Auth token for password protecting redis, transit_encryption_enabled must be set to 'true'! Password must be longer than 16 chars" } variable "redis_transit_encryption_enabled" { - type = "string" + type = string default = "true" description = "Enable TLS" } variable "redis_params" { - type = "list" + type = list(string) default = [] description = "A list of Redis parameters to apply. Note that parameters may differ from a Redis family to another" } module "elasticache_redis" { source = "git::https://github.com/cloudposse/terraform-aws-elasticache-redis.git?ref=tags/0.7.1" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.redis_name}" - zone_id = "${local.zone_id}" + namespace = var.namespace + stage = var.stage + name = var.redis_name + zone_id = local.zone_id security_groups = ["${module.kops_metadata.nodes_security_group_id}"] - vpc_id = "${module.vpc.vpc_id}" + vpc_id = module.vpc.vpc_id subnets = ["${module.subnets.private_subnet_ids}"] maintenance_window = "sun:03:00-sun:04:00" - cluster_size = "${var.redis_cluster_size}" - instance_type = "${var.redis_instance_type}" - transit_encryption_enabled = "${var.redis_transit_encryption_enabled}" + cluster_size = var.redis_cluster_size + instance_type = var.redis_instance_type + transit_encryption_enabled = var.redis_transit_encryption_enabled engine_version = "3.2.6" family = "redis3.2" port = "6379" @@ -61,20 +61,20 @@ module "elasticache_redis" { apply_immediately = "true" availability_zones = ["${local.availability_zones}"] automatic_failover = "false" - enabled = "${var.redis_cluster_enabled}" - auth_token = "${var.redis_auth_token}" + enabled = var.redis_cluster_enabled + auth_token = var.redis_auth_token - parameter = "${var.redis_params}" + parameter = var.redis_params } output "elasticache_redis_id" { - value = "${module.elasticache_redis.id}" + value = module.elasticache_redis.id } output "elasticache_redis_security_group_id" { - value = "${module.elasticache_redis.security_group_id}" + value = module.elasticache_redis.security_group_id } output "elasticache_redis_host" { - value = "${module.elasticache_redis.host}" + value = module.elasticache_redis.host } diff --git a/deprecated/aws/backing-services/elasticsearch.tf b/deprecated/aws/backing-services/elasticsearch.tf index 24b1c7e91..9d8d5e5d5 100644 --- a/deprecated/aws/backing-services/elasticsearch.tf +++ b/deprecated/aws/backing-services/elasticsearch.tf @@ -1,18 +1,18 @@ variable "elasticsearch_name" { - type = "string" + type = string default = "elasticsearch" description = "Elasticsearch cluster name" } variable "elasticsearch_version" { - type = "string" + type = string default = "6.2" description = "Version of Elasticsearch to deploy" } # Encryption at rest is not supported with t2.small.elasticsearch instances variable "elasticsearch_encrypt_at_rest_enabled" { - type = "string" + type = string default = "false" description = "Whether to enable encryption at rest" } @@ -24,7 +24,7 @@ variable "elasticsearch_ebs_volume_size" { } variable "elasticsearch_instance_type" { - type = "string" + type = string default = "t2.small.elasticsearch" description = "Elasticsearch instance type for data nodes in the cluster" } @@ -36,19 +36,19 @@ variable "elasticsearch_instance_count" { # https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html variable "elasticsearch_iam_actions" { - type = "list" + type = list(string) default = ["es:ESHttpGet", "es:ESHttpPut", "es:ESHttpPost", "es:ESHttpHead", "es:Describe*", "es:List*"] description = "List of actions to allow for the IAM roles, _e.g._ `es:ESHttpGet`, `es:ESHttpPut`, `es:ESHttpPost`" } variable "elasticsearch_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } variable "elasticsearch_permitted_nodes" { - type = "string" + type = string description = "Kops kubernetes nodes that are permitted to access elastic search (e.g. 'nodes', 'masters', 'both' or 'any')" default = "nodes" } @@ -71,23 +71,23 @@ locals { module "elasticsearch" { source = "git::https://github.com/cloudposse/terraform-aws-elasticsearch.git?ref=tags/0.1.5" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.elasticsearch_name}" - dns_zone_id = "${local.zone_id}" + namespace = var.namespace + stage = var.stage + name = var.elasticsearch_name + dns_zone_id = local.zone_id security_groups = ["${local.security_groups[var.elasticsearch_permitted_nodes]}"] - vpc_id = "${module.vpc.vpc_id}" + vpc_id = module.vpc.vpc_id subnet_ids = ["${slice(module.subnets.private_subnet_ids, 0, min(2, length(module.subnets.private_subnet_ids)))}"] - zone_awareness_enabled = "${length(module.subnets.private_subnet_ids) > 1 ? "true" : "false"}" - elasticsearch_version = "${var.elasticsearch_version}" - instance_type = "${var.elasticsearch_instance_type}" - instance_count = "${var.elasticsearch_instance_count}" + zone_awareness_enabled = length(module.subnets.private_subnet_ids) > 1 ? "true" : "false" + elasticsearch_version = var.elasticsearch_version + instance_type = var.elasticsearch_instance_type + instance_count = var.elasticsearch_instance_count iam_role_arns = ["${local.role_arns[var.elasticsearch_permitted_nodes]}"] iam_actions = ["${var.elasticsearch_iam_actions}"] kibana_subdomain_name = "kibana-elasticsearch" - ebs_volume_size = "${var.elasticsearch_ebs_volume_size}" - encrypt_at_rest_enabled = "${var.elasticsearch_encrypt_at_rest_enabled}" - enabled = "${var.elasticsearch_enabled}" + ebs_volume_size = var.elasticsearch_ebs_volume_size + encrypt_at_rest_enabled = var.elasticsearch_encrypt_at_rest_enabled + enabled = var.elasticsearch_enabled advanced_options = { "rest.action.multi.allow_explicit_index" = "true" @@ -95,36 +95,36 @@ module "elasticsearch" { } output "elasticsearch_security_group_id" { - value = "${module.elasticsearch.security_group_id}" + value = module.elasticsearch.security_group_id description = "Security Group ID to control access to the Elasticsearch domain" } output "elasticsearch_domain_arn" { - value = "${module.elasticsearch.domain_arn}" + value = module.elasticsearch.domain_arn description = "ARN of the Elasticsearch domain" } output "elasticsearch_domain_id" { - value = "${module.elasticsearch.domain_id}" + value = module.elasticsearch.domain_id description = "Unique identifier for the Elasticsearch domain" } output "elasticsearch_domain_endpoint" { - value = "${module.elasticsearch.domain_endpoint}" + value = module.elasticsearch.domain_endpoint description = "Domain-specific endpoint used to submit index, search, and data upload requests" } output "elasticsearch_kibana_endpoint" { - value = "${module.elasticsearch.kibana_endpoint}" + value = module.elasticsearch.kibana_endpoint description = "Domain-specific endpoint for Kibana without https scheme" } output "elasticsearch_domain_hostname" { - value = "${module.elasticsearch.domain_hostname}" + value = module.elasticsearch.domain_hostname description = "Elasticsearch domain hostname to submit index, search, and data upload requests" } output "elasticsearch_kibana_hostname" { - value = "${module.elasticsearch.kibana_hostname}" + value = module.elasticsearch.kibana_hostname description = "Kibana hostname" } diff --git a/deprecated/aws/backing-services/flow-logs.tf b/deprecated/aws/backing-services/flow-logs.tf index a812b0b5c..978c1391a 100644 --- a/deprecated/aws/backing-services/flow-logs.tf +++ b/deprecated/aws/backing-services/flow-logs.tf @@ -1,64 +1,64 @@ variable "flow_logs_enabled" { - type = "string" + type = string default = "true" } module "flow_logs" { source = "git::https://github.com/cloudposse/terraform-aws-vpc-flow-logs-s3-bucket.git?ref=tags/0.1.0" - name = "${local.name}" - namespace = "${var.namespace}" - stage = "${var.stage}" - attributes = "${list("flow-logs")}" + name = local.name + namespace = var.namespace + stage = var.stage + attributes = list("flow-logs") - region = "${var.region}" + region = var.region - enabled = "${var.flow_logs_enabled}" + enabled = var.flow_logs_enabled - vpc_id = "${module.vpc.vpc_id}" + vpc_id = module.vpc.vpc_id } output "flow_logs_kms_key_arn" { - value = "${module.flow_logs.kms_key_arn}" + value = module.flow_logs.kms_key_arn description = "Flow logs KMS Key ARN" } output "flow_logs_kms_key_id" { - value = "${module.flow_logs.kms_key_id}" + value = module.flow_logs.kms_key_id description = "Flow logs KMS Key ID" } output "flow_logs_kms_alias_arn" { - value = "${module.flow_logs.kms_alias_arn}" + value = module.flow_logs.kms_alias_arn description = "Flow logs KMS Alias ARN" } output "flow_logs_kms_alias_name" { - value = "${module.flow_logs.kms_alias_name}" + value = module.flow_logs.kms_alias_name description = "Flow logs KMS Alias name" } output "flow_logs_bucket_domain_name" { - value = "${module.flow_logs.bucket_domain_name}" + value = module.flow_logs.bucket_domain_name description = "Flow logs FQDN of bucket" } output "flow_logs_bucket_id" { - value = "${module.flow_logs.bucket_id}" + value = module.flow_logs.bucket_id description = "Flow logs bucket Name (aka ID)" } output "flow_logs_bucket_arn" { - value = "${module.flow_logs.bucket_arn}" + value = module.flow_logs.bucket_arn description = "Flow logs bucket ARN" } output "flow_logs_bucket_prefix" { - value = "${module.flow_logs.bucket_prefix}" + value = module.flow_logs.bucket_prefix description = "Flow logs bucket prefix configured for lifecycle rules" } output "flow_logs_id" { - value = "${module.flow_logs.id}" + value = module.flow_logs.id description = "Flow logs ID" } diff --git a/deprecated/aws/backing-services/kops-metadata.tf b/deprecated/aws/backing-services/kops-metadata.tf index ea764cfb6..9433f8a5f 100644 --- a/deprecated/aws/backing-services/kops-metadata.tf +++ b/deprecated/aws/backing-services/kops-metadata.tf @@ -1,11 +1,11 @@ variable "kops_metadata_enabled" { description = "Set to false to prevent the module from creating any resources" - type = "string" + type = string default = "false" } module "kops_metadata" { source = "git::https://github.com/cloudposse/terraform-aws-kops-metadata.git?ref=tags/0.2.0" dns_zone = "${var.region}.${var.zone_name}" - enabled = "${var.kops_metadata_enabled}" + enabled = var.kops_metadata_enabled } diff --git a/deprecated/aws/backing-services/main.tf b/deprecated/aws/backing-services/main.tf index daf7ce7dc..abeece420 100644 --- a/deprecated/aws/backing-services/main.tf +++ b/deprecated/aws/backing-services/main.tf @@ -5,50 +5,50 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `eg` or `cp`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "region" { - type = "string" + type = string description = "AWS region" } variable "availability_zones" { - type = "list" + type = list(string) description = "AWS region availability zones to use (e.g.: ['us-west-2a', 'us-west-2b']). If empty will use all available zones" default = [] } variable "zone_name" { - type = "string" + type = string description = "DNS zone name" } data "aws_availability_zones" "available" {} data "aws_route53_zone" "default" { - name = "${var.zone_name}" + name = var.zone_name } locals { null = "" - zone_id = "${data.aws_route53_zone.default.zone_id}" + zone_id = data.aws_route53_zone.default.zone_id availability_zones = ["${split(",", length(var.availability_zones) == 0 ? join(",", data.aws_availability_zones.available.names) : join(",", var.availability_zones))}"] - chamber_service = "${var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service}" + chamber_service = var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } diff --git a/deprecated/aws/backing-services/rds-replica.tf b/deprecated/aws/backing-services/rds-replica.tf index 6bcf66db4..00dd10f1f 100644 --- a/deprecated/aws/backing-services/rds-replica.tf +++ b/deprecated/aws/backing-services/rds-replica.tf @@ -1,23 +1,23 @@ variable "rds_replica_name" { - type = "string" + type = string default = "rds-replica" description = "RDS instance name" } variable "rds_replica_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } variable "rds_replica_replicate_source_db" { - type = "string" + type = string description = "Specifies that this resource is a Replicate database, and to use this value as the source database. This correlates to the identifier of another Amazon RDS Database to replicate. Note that if you are creating a cross-region replica of an encrypted database you will also need to specify a `kms_key_id`." default = "changeme" } variable "rds_replica_kms_key_id" { - type = "string" + type = string description = "The ARN for the KMS encryption key. If creating an encrypted replica, set this to the destination KMS ARN." default = "" } @@ -25,155 +25,155 @@ variable "rds_replica_kms_key_id" { # db.t2.micro is free tier # https://aws.amazon.com/rds/free variable "rds_replica_instance_type" { - type = "string" + type = string default = "db.t2.micro" description = "EC2 instance type for RDS DB" } variable "rds_replica_port" { - type = "string" + type = string default = "3306" description = "RDS DB port" } variable "rds_replica_snapshot" { - type = "string" + type = string default = "" description = "Set to a snapshot ID to restore from snapshot" } variable "rds_replica_multi_az" { - type = "string" + type = string default = "false" - description = "Run instaces in multiple az" + description = "Run instances in multiple az" } variable "rds_replica_storage_type" { - type = "string" + type = string default = "gp2" description = "Storage type" } variable "rds_replica_storage_size" { - type = "string" + type = string default = "20" description = "Storage size in Gb" } variable "rds_replica_storage_encrypted" { - type = "string" + type = string default = "true" description = "Set to true to encrypt storage" } variable "rds_replica_auto_minor_version_upgrade" { - type = "string" + type = string default = "true" description = "Allow automated minor version upgrade (e.g. from Postgres 9.5.3 to Postgres 9.5.4)" } variable "rds_replica_allow_major_version_upgrade" { - type = "string" + type = string default = "false" description = "Allow major version upgrade" } variable "rds_replica_apply_immediately" { - type = "string" + type = string default = "true" description = "Specifies whether any database modifications are applied immediately, or during the next maintenance window" } variable "rds_replica_skip_final_snapshot" { - type = "string" + type = string default = "false" description = "If true (default), no snapshot will be made before deleting DB" } variable "rds_replica_backup_retention_period" { - type = "string" + type = string default = "7" description = "Backup retention period in days. Must be > 0 to enable backups" } variable "rds_replica_backup_window" { - type = "string" + type = string default = "22:00-03:00" description = "When AWS can perform DB snapshots, can't overlap with maintenance window" } locals { - rds_replica_enabled = "${var.rds_replica_enabled == "true"}" + rds_replica_enabled = var.rds_replica_enabled == "true" } module "rds_replica" { source = "git::https://github.com/cloudposse/terraform-aws-rds-replica.git?ref=tags/0.1.0" - enabled = "${var.rds_replica_enabled}" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.rds_replica_name}" - kms_key_id = "${var.rds_replica_kms_key_id}" - replicate_source_db = "${var.rds_replica_replicate_source_db}" - dns_zone_id = "${local.zone_id}" - host_name = "${var.rds_replica_name}" + enabled = var.rds_replica_enabled + namespace = var.namespace + stage = var.stage + name = var.rds_replica_name + kms_key_id = var.rds_replica_kms_key_id + replicate_source_db = var.rds_replica_replicate_source_db + dns_zone_id = local.zone_id + host_name = var.rds_replica_name security_group_ids = ["${module.kops_metadata.nodes_security_group_id}"] - database_port = "${var.rds_replica_port}" - multi_az = "${var.rds_replica_multi_az}" - storage_type = "${var.rds_replica_storage_type}" - storage_encrypted = "${var.rds_replica_storage_encrypted}" - instance_class = "${var.rds_replica_instance_type}" + database_port = var.rds_replica_port + multi_az = var.rds_replica_multi_az + storage_type = var.rds_replica_storage_type + storage_encrypted = var.rds_replica_storage_encrypted + instance_class = var.rds_replica_instance_type publicly_accessible = "false" subnet_ids = ["${module.subnets.private_subnet_ids}"] - vpc_id = "${module.vpc.vpc_id}" - snapshot_identifier = "${var.rds_replica_snapshot}" - auto_minor_version_upgrade = "${var.rds_replica_auto_minor_version_upgrade}" - allow_major_version_upgrade = "${var.rds_replica_allow_major_version_upgrade}" - apply_immediately = "${var.rds_replica_apply_immediately}" - skip_final_snapshot = "${var.rds_replica_skip_final_snapshot}" + vpc_id = module.vpc.vpc_id + snapshot_identifier = var.rds_replica_snapshot + auto_minor_version_upgrade = var.rds_replica_auto_minor_version_upgrade + allow_major_version_upgrade = var.rds_replica_allow_major_version_upgrade + apply_immediately = var.rds_replica_apply_immediately + skip_final_snapshot = var.rds_replica_skip_final_snapshot copy_tags_to_snapshot = "true" - backup_retention_period = "${var.rds_replica_backup_retention_period}" - backup_window = "${var.rds_replica_backup_window}" + backup_retention_period = var.rds_replica_backup_retention_period + backup_window = var.rds_replica_backup_window } resource "aws_ssm_parameter" "rds_replica_hostname" { - count = "${local.rds_replica_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "rds_replica_hostname")}" - value = "${module.rds_replica.hostname}" + count = local.rds_replica_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "rds_replica_hostname") + value = module.rds_replica.hostname description = "RDS replica hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "rds_replica_port" { - count = "${local.rds_replica_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "rds_replica_port")}" - value = "${var.rds_replica_port}" + count = local.rds_replica_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "rds_replica_port") + value = var.rds_replica_port description = "RDS replica port" type = "String" overwrite = "true" } output "rds_replica_instance_id" { - value = "${module.rds_replica.instance_id}" + value = module.rds_replica.instance_id description = "RDS replica ID of the instance" } output "rds_replica_instance_address" { - value = "${module.rds_replica.instance_address}" + value = module.rds_replica.instance_address description = "RDS replica address of the instance" } output "rds_replica_instance_endpoint" { - value = "${module.rds_replica.instance_endpoint}" + value = module.rds_replica.instance_endpoint description = "RDS replica DNS Endpoint of the instance" } output "rds_replica_port" { - value = "${local.rds_replica_enabled ? var.rds_replica_port : local.null}" + value = local.rds_replica_enabled ? var.rds_replica_port : local.null description = "RDS replica port" } output "rds_replica_hostname" { - value = "${module.rds_replica.hostname}" + value = module.rds_replica.hostname description = "RDS replica host name of the instance" } diff --git a/deprecated/aws/backing-services/rds.tf b/deprecated/aws/backing-services/rds.tf index 24f9883f0..a65f13375 100644 --- a/deprecated/aws/backing-services/rds.tf +++ b/deprecated/aws/backing-services/rds.tf @@ -1,11 +1,11 @@ variable "rds_name" { - type = "string" + type = string default = "rds" description = "RDS instance name" } variable "rds_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } @@ -13,7 +13,7 @@ variable "rds_enabled" { # Don't use `root` # ("MasterUsername root cannot be used as it is a reserved word used by the engine") variable "rds_admin_user" { - type = "string" + type = string description = "RDS DB admin user name" default = "" } @@ -21,7 +21,7 @@ variable "rds_admin_user" { # Must be longer than 8 chars # ("The parameter MasterUserPassword is not a valid password because it is shorter than 8 characters") variable "rds_admin_password" { - type = "string" + type = string description = "RDS DB password for the admin user" default = "" } @@ -29,7 +29,7 @@ variable "rds_admin_password" { # Don't use `default` # ("DatabaseName default cannot be used as it is a reserved word used by the engine") variable "rds_db_name" { - type = "string" + type = string description = "RDS DB database name" default = "" } @@ -37,248 +37,248 @@ variable "rds_db_name" { # db.t2.micro is free tier # https://aws.amazon.com/rds/free variable "rds_instance_type" { - type = "string" + type = string default = "db.t2.micro" description = "EC2 instance type for RDS DB" } variable "rds_engine" { - type = "string" + type = string default = "mysql" description = "RDS DB engine" } variable "rds_engine_version" { - type = "string" + type = string default = "5.6" description = "RDS DB engine version" } variable "rds_port" { - type = "string" + type = string default = "3306" description = "RDS DB port" } variable "rds_db_parameter_group" { - type = "string" + type = string default = "mysql5.6" description = "RDS DB engine version" } variable "rds_snapshot" { - type = "string" + type = string default = "" description = "Set to a snapshot ID to restore from snapshot" } variable "rds_parameter_group_name" { - type = "string" + type = string default = "" description = "Existing parameter group name to use" } variable "rds_multi_az" { - type = "string" + type = string default = "false" - description = "Run instaces in multiple az" + description = "Run instances in multiple az" } variable "rds_storage_type" { - type = "string" + type = string default = "gp2" description = "Storage type" } variable "rds_storage_size" { - type = "string" + type = string default = "20" description = "Storage size" } variable "rds_storage_encrypted" { - type = "string" + type = string default = "true" description = "Set true to encrypt storage" } variable "rds_auto_minor_version_upgrade" { - type = "string" + type = string default = "false" description = "Allow automated minor version upgrade (e.g. from Postgres 9.5.3 to Postgres 9.5.4)" } variable "rds_allow_major_version_upgrade" { - type = "string" + type = string default = "false" description = "Allow major version upgrade" } variable "rds_apply_immediately" { - type = "string" + type = string default = "true" description = "Specifies whether any database modifications are applied immediately, or during the next maintenance window" } variable "rds_skip_final_snapshot" { - type = "string" + type = string default = "false" description = "If true (default), no snapshot will be made before deleting DB" } variable "rds_backup_retention_period" { - type = "string" + type = string default = "7" description = "Backup retention period in days. Must be > 0 to enable backups" } variable "rds_backup_window" { - type = "string" + type = string default = "22:00-03:00" description = "When AWS can perform DB snapshots, can't overlap with maintenance window" } resource "random_pet" "rds_db_name" { - count = "${local.rds_enabled ? 1 : 0}" + count = local.rds_enabled ? 1 : 0 separator = "_" } resource "random_string" "rds_admin_user" { - count = "${local.rds_enabled ? 1 : 0}" + count = local.rds_enabled ? 1 : 0 length = 8 special = false number = false } resource "random_string" "rds_admin_password" { - count = "${local.rds_enabled ? 1 : 0}" + count = local.rds_enabled ? 1 : 0 length = 16 special = true } locals { - rds_enabled = "${var.rds_enabled == "true"}" - rds_admin_user = "${length(var.rds_admin_user) > 0 ? var.rds_admin_user : join("", random_string.rds_admin_user.*.result)}" - rds_admin_password = "${length(var.rds_admin_password) > 0 ? var.rds_admin_password : join("", random_string.rds_admin_password.*.result)}" - rds_db_name = "${join("", random_pet.rds_db_name.*.id)}" + rds_enabled = var.rds_enabled == "true" + rds_admin_user = length(var.rds_admin_user) > 0 ? var.rds_admin_user : join("", random_string.rds_admin_user.*.result) + rds_admin_password = length(var.rds_admin_password) > 0 ? var.rds_admin_password : join("", random_string.rds_admin_password.*.result) + rds_db_name = join("", random_pet.rds_db_name.*.id) } module "rds" { source = "git::https://github.com/cloudposse/terraform-aws-rds.git?ref=tags/0.4.4" - enabled = "${var.rds_enabled}" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.rds_name}" - dns_zone_id = "${local.zone_id}" - host_name = "${var.rds_name}" + enabled = var.rds_enabled + namespace = var.namespace + stage = var.stage + name = var.rds_name + dns_zone_id = local.zone_id + host_name = var.rds_name security_group_ids = ["${module.kops_metadata.nodes_security_group_id}"] - database_name = "${local.rds_db_name}" - database_user = "${local.rds_admin_user}" - database_password = "${local.rds_admin_password}" - database_port = "${var.rds_port}" - multi_az = "${var.rds_multi_az}" - storage_type = "${var.rds_storage_type}" - allocated_storage = "${var.rds_storage_size}" - storage_encrypted = "${var.rds_storage_encrypted}" - engine = "${var.rds_engine}" - engine_version = "${var.rds_engine_version}" - instance_class = "${var.rds_instance_type}" - db_parameter_group = "${var.rds_db_parameter_group}" - parameter_group_name = "${var.rds_parameter_group_name}" + database_name = local.rds_db_name + database_user = local.rds_admin_user + database_password = local.rds_admin_password + database_port = var.rds_port + multi_az = var.rds_multi_az + storage_type = var.rds_storage_type + allocated_storage = var.rds_storage_size + storage_encrypted = var.rds_storage_encrypted + engine = var.rds_engine + engine_version = var.rds_engine_version + instance_class = var.rds_instance_type + db_parameter_group = var.rds_db_parameter_group + parameter_group_name = var.rds_parameter_group_name publicly_accessible = "false" subnet_ids = ["${module.subnets.private_subnet_ids}"] - vpc_id = "${module.vpc.vpc_id}" - snapshot_identifier = "${var.rds_snapshot}" - auto_minor_version_upgrade = "${var.rds_auto_minor_version_upgrade}" - allow_major_version_upgrade = "${var.rds_allow_major_version_upgrade}" - apply_immediately = "${var.rds_apply_immediately}" - skip_final_snapshot = "${var.rds_skip_final_snapshot}" + vpc_id = module.vpc.vpc_id + snapshot_identifier = var.rds_snapshot + auto_minor_version_upgrade = var.rds_auto_minor_version_upgrade + allow_major_version_upgrade = var.rds_allow_major_version_upgrade + apply_immediately = var.rds_apply_immediately + skip_final_snapshot = var.rds_skip_final_snapshot copy_tags_to_snapshot = "true" - backup_retention_period = "${var.rds_backup_retention_period}" - backup_window = "${var.rds_backup_window}" + backup_retention_period = var.rds_backup_retention_period + backup_window = var.rds_backup_window } resource "aws_ssm_parameter" "rds_db_name" { - count = "${local.rds_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "rds_db_name")}" - value = "${local.rds_db_name}" + count = local.rds_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "rds_db_name") + value = local.rds_db_name description = "RDS Database Name" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "rds_admin_username" { - count = "${local.rds_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "rds_admin_username")}" - value = "${local.rds_admin_user}" + count = local.rds_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "rds_admin_username") + value = local.rds_admin_user description = "RDS Username for the admin DB user" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "rds_admin_password" { - count = "${local.rds_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "rds_admin_password")}" - value = "${local.rds_admin_password}" + count = local.rds_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "rds_admin_password") + value = local.rds_admin_password description = "RDS Password for the admin DB user" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "rds_hostname" { - count = "${local.rds_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "rds_hostname")}" - value = "${module.rds.hostname}" + count = local.rds_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "rds_hostname") + value = module.rds.hostname description = "RDS hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "rds_port" { - count = "${local.rds_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "rds_port")}" - value = "${var.rds_port}" + count = local.rds_enabled ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, "rds_port") + value = var.rds_port description = "RDS port" type = "String" overwrite = "true" } output "rds_instance_id" { - value = "${module.rds.instance_id}" + value = module.rds.instance_id description = "RDS ID of the instance" } output "rds_instance_address" { - value = "${module.rds.instance_address}" + value = module.rds.instance_address description = "RDS address of the instance" } output "rds_instance_endpoint" { - value = "${module.rds.instance_endpoint}" + value = module.rds.instance_endpoint description = "RDS DNS Endpoint of the instance" } output "rds_port" { - value = "${local.rds_enabled ? var.rds_port : local.null}" + value = local.rds_enabled ? var.rds_port : local.null description = "RDS port" } output "rds_db_name" { - value = "${local.rds_enabled ? local.rds_db_name : local.null}" + value = local.rds_enabled ? local.rds_db_name : local.null description = "RDS db name" } output "rds_admin_user" { - value = "${local.rds_enabled ? local.rds_admin_user : local.null}" + value = local.rds_enabled ? local.rds_admin_user : local.null description = "RDS admin user name" } output "rds_admin_password" { - value = "${local.rds_enabled ? local.rds_admin_password : local.null}" + value = local.rds_enabled ? local.rds_admin_password : local.null description = "RDS admin password" } output "rds_hostname" { - value = "${module.rds.hostname}" + value = module.rds.hostname description = "RDS host name of the instance" } diff --git a/deprecated/aws/backing-services/vpc.tf b/deprecated/aws/backing-services/vpc.tf index ad2f45ef2..2d0ab1e44 100644 --- a/deprecated/aws/backing-services/vpc.tf +++ b/deprecated/aws/backing-services/vpc.tf @@ -19,29 +19,29 @@ data "aws_region" "current" {} module "vpc" { source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=tags/0.4.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" - cidr_block = "${var.vpc_cidr_block}" + namespace = var.namespace + stage = var.stage + name = local.name + cidr_block = var.vpc_cidr_block } module "subnets" { source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.8.0" availability_zones = ["${local.availability_zones}"] - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" - region = "${var.region}" - vpc_id = "${module.vpc.vpc_id}" - igw_id = "${module.vpc.igw_id}" - cidr_block = "${module.vpc.vpc_cidr_block}" - nat_gateway_enabled = "${var.vpc_nat_gateway_enabled}" - max_subnet_count = "${var.vpc_max_subnet_count}" + namespace = var.namespace + stage = var.stage + name = local.name + region = var.region + vpc_id = module.vpc.vpc_id + igw_id = module.vpc.igw_id + cidr_block = module.vpc.vpc_cidr_block + nat_gateway_enabled = var.vpc_nat_gateway_enabled + max_subnet_count = var.vpc_max_subnet_count } output "vpc_id" { description = "VPC ID of backing services" - value = "${module.vpc.vpc_id}" + value = module.vpc.vpc_id } output "public_subnet_ids" { @@ -56,5 +56,5 @@ output "private_subnet_ids" { output "region" { description = "AWS region of backing services" - value = "${data.aws_region.current.name}" + value = data.aws_region.current.name } diff --git a/deprecated/aws/bootstrap/README.md b/deprecated/aws/bootstrap/README.md index 913398ceb..f53ae9f5b 100644 --- a/deprecated/aws/bootstrap/README.md +++ b/deprecated/aws/bootstrap/README.md @@ -1,6 +1,6 @@ # bootstrap -This module provisions an AWS user along with a bootstrap role suitable for bootstrapping an AWS multi-account architecture as found in our [reference architectures](https://github.com/cloudposse/reference-architecutres). +This module provisions an AWS user along with a bootstrap role suitable for bootstrapping an AWS multi-account architecture as found in our [reference architectures](https://github.com/cloudposse/reference-architecutres). These user and role are intended to be used as a **temporary fixture** and should be deprovisioned after all accounts have been provisioned in order to maintain a secure environment. diff --git a/deprecated/aws/bootstrap/main.tf b/deprecated/aws/bootstrap/main.tf index 042fea3f5..04cb3fe9e 100644 --- a/deprecated/aws/bootstrap/main.tf +++ b/deprecated/aws/bootstrap/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -14,8 +14,8 @@ provider "aws" { module "user" { source = "git::https://github.com/cloudposse/terraform-aws-iam-system-user.git?ref=tags/0.3.2" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "bootstrap" } @@ -36,18 +36,18 @@ data "aws_iam_policy_document" "assume_role" { # Fetch the OrganizationAccountAccessRole ARNs from SSM module "organization_account_access_role_arns" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - parameter_read = "${formatlist("/${var.namespace}/%s/organization_account_access_role", var.accounts_enabled)}" + parameter_read = formatlist("/${var.namespace}/%s/organization_account_access_role", var.accounts_enabled) } # IAM role for bootstrapping; allow user to assume it resource "aws_iam_role" "bootstrap" { - name = "${module.user.user_name}" - assume_role_policy = "${data.aws_iam_policy_document.assume_role.json}" + name = module.user.user_name + assume_role_policy = data.aws_iam_policy_document.assume_role.json } # Grant Administrator Access to the current "root" account to the role resource "aws_iam_role_policy_attachment" "administrator_access" { - role = "${aws_iam_role.bootstrap.name}" + role = aws_iam_role.bootstrap.name policy_arn = "arn:aws:iam::aws:policy/AdministratorAccess" } @@ -65,83 +65,83 @@ data "aws_iam_policy_document" "organization_account_access_role" { # Create an IAM policy from the generated document resource "aws_iam_policy" "organization_account_access_role" { - name = "${aws_iam_role.bootstrap.name}" - policy = "${data.aws_iam_policy_document.organization_account_access_role.json}" + name = aws_iam_role.bootstrap.name + policy = data.aws_iam_policy_document.organization_account_access_role.json } # Assign the policy to the user resource "aws_iam_user_policy_attachment" "organization_account_access_role" { - user = "${aws_iam_role.bootstrap.name}" - policy_arn = "${aws_iam_policy.organization_account_access_role.arn}" + user = aws_iam_role.bootstrap.name + policy_arn = aws_iam_policy.organization_account_access_role.arn } # Render the env file with IAM credentials data "template_file" "env" { - template = "${file("${path.module}/env.tpl")}" + template = file("${path.module}/env.tpl") vars { - aws_access_key_id = "${module.user.access_key_id}" - aws_secret_access_key = "${module.user.secret_access_key}" - aws_assume_role_arn = "${aws_iam_role.bootstrap.arn}" - aws_data_path = "${dirname(local_file.config_file.filename)}" - aws_config_file = "${local_file.config_file.filename}" + aws_access_key_id = module.user.access_key_id + aws_secret_access_key = module.user.secret_access_key + aws_assume_role_arn = aws_iam_role.bootstrap.arn + aws_data_path = dirname(local_file.config_file.filename) + aws_config_file = local_file.config_file.filename } } # Write the env file to disk resource "local_file" "env_file" { - content = "${data.template_file.env.rendered}" + content = data.template_file.env.rendered filename = "${var.output_path}/${var.env_file}" } # Render the credentials file with IAM credentials data "template_file" "credentials" { - template = "${file("${path.module}/credentials.tpl")}" + template = file("${path.module}/credentials.tpl") vars { - source_profile_name = "${var.namespace}" - aws_access_key_id = "${module.user.access_key_id}" - aws_secret_access_key = "${module.user.secret_access_key}" - aws_assume_role_arn = "${aws_iam_role.bootstrap.arn}" + source_profile_name = var.namespace + aws_access_key_id = module.user.access_key_id + aws_secret_access_key = module.user.secret_access_key + aws_assume_role_arn = aws_iam_role.bootstrap.arn } } # Write the credentials file to disk resource "local_file" "credentials_file" { - content = "${data.template_file.credentials.rendered}" + content = data.template_file.credentials.rendered filename = "${var.output_path}/${var.credentials_file}" } # Render the config file with IAM credentials data "template_file" "config_root" { - template = "${file("${path.module}/config.tpl")}" + template = file("${path.module}/config.tpl") vars { profile_name = "${var.namespace}-${var.stage}-admin" - source_profile = "${var.namespace}" - region = "${var.aws_region}" - role_arn = "${aws_iam_role.bootstrap.arn}" + source_profile = var.namespace + region = var.aws_region + role_arn = aws_iam_role.bootstrap.arn } } # Render the config file with IAM credentials data "template_file" "config" { - count = "${length(module.organization_account_access_role_arns.values)}" - template = "${file("${path.module}/config.tpl")}" + count = length(module.organization_account_access_role_arns.values) + template = file("${path.module}/config.tpl") vars { profile_name = "${var.namespace}-${var.accounts_enabled[count.index]}-admin" - source_profile = "${var.namespace}" - region = "${var.aws_region}" - role_arn = "${module.organization_account_access_role_arns.values[count.index]}" + source_profile = var.namespace + region = var.aws_region + role_arn = module.organization_account_access_role_arns.values[count.index] } } # Write the config file to disk resource "local_file" "config_file" { - content = "${join("\n\n", + content = (join("\n\n", concat(list("[profile ${var.namespace}]"), - list(data.template_file.config_root.rendered), data.template_file.config.*.rendered))}" + list(data.template_file.config_root.rendered), data.template_file.config.*.rendered))) filename = "${var.output_path}/${var.config_file}" } diff --git a/deprecated/aws/bootstrap/outputs.tf b/deprecated/aws/bootstrap/outputs.tf index 972a349f8..f92e33a34 100644 --- a/deprecated/aws/bootstrap/outputs.tf +++ b/deprecated/aws/bootstrap/outputs.tf @@ -1,4 +1,4 @@ output "env_file" { description = "Env file with IAM bootstrap credentials" - value = "${var.env_file}" + value = var.env_file } diff --git a/deprecated/aws/bootstrap/variables.tf b/deprecated/aws/bootstrap/variables.tf index cbbe78083..4024c9fa2 100644 --- a/deprecated/aws/bootstrap/variables.tf +++ b/deprecated/aws/bootstrap/variables.tf @@ -1,47 +1,47 @@ variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "aws_region" { - type = "string" + type = string } variable "accounts_enabled" { - type = "list" + type = list(string) description = "Accounts to enable" default = ["dev", "staging", "prod", "testing", "audit"] } variable "output_path" { - type = "string" + type = string default = "./" description = "Base directory where files will be written" } variable "env_file" { - type = "string" + type = string description = "File to write the temporary bootstrap environment variable settings" default = ".envrc" } variable "config_file" { - type = "string" + type = string description = "File to write the temporary bootstrap AWS config" default = ".aws/config" } variable "credentials_file" { - type = "string" + type = string description = "File to write the temporary bootstrap AWS credentials" default = ".aws/credentials" } diff --git a/deprecated/aws/chamber/kms-key.tf b/deprecated/aws/chamber/kms-key.tf index e8fd50e00..dccf48af2 100644 --- a/deprecated/aws/chamber/kms-key.tf +++ b/deprecated/aws/chamber/kms-key.tf @@ -1,27 +1,27 @@ module "chamber_kms_key" { source = "git::https://github.com/cloudposse/terraform-aws-kms-key.git?ref=tags/0.1.0" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "chamber" description = "KMS key for chamber" } output "chamber_kms_key_arn" { - value = "${module.chamber_kms_key.key_arn}" + value = module.chamber_kms_key.key_arn description = "KMS key ARN" } output "chamber_kms_key_id" { - value = "${module.chamber_kms_key.key_id}" + value = module.chamber_kms_key.key_id description = "KMS key ID" } output "chamber_kms_key_alias_arn" { - value = "${module.chamber_kms_key.alias_arn}" + value = module.chamber_kms_key.alias_arn description = "KMS key alias ARN" } output "chamber_kms_key_alias_name" { - value = "${module.chamber_kms_key.alias_name}" + value = module.chamber_kms_key.alias_name description = "KMS key alias name" } diff --git a/deprecated/aws/chamber/main.tf b/deprecated/aws/chamber/main.tf index 315ed0d61..48f8edf6e 100644 --- a/deprecated/aws/chamber/main.tf +++ b/deprecated/aws/chamber/main.tf @@ -5,27 +5,27 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "parameter_groups" { - type = "list" + type = list(string) description = "Parameter group names" default = ["kops", "app"] } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } diff --git a/deprecated/aws/chamber/s3-bucket.tf b/deprecated/aws/chamber/s3-bucket.tf index f6136a358..81a0d999f 100644 --- a/deprecated/aws/chamber/s3-bucket.tf +++ b/deprecated/aws/chamber/s3-bucket.tf @@ -10,54 +10,54 @@ variable "s3_user_enabled" { module "s3_bucket" { source = "git::https://github.com/cloudposse/terraform-aws-s3-bucket.git?ref=tags/0.3.0" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "chamber" - enabled = "${var.s3_enabled}" + enabled = var.s3_enabled versioning_enabled = "false" - user_enabled = "${var.s3_user_enabled}" + user_enabled = var.s3_user_enabled sse_algorithm = "AES256" allow_encrypted_uploads_only = "true" } output "bucket_domain_name" { - value = "${module.s3_bucket.bucket_domain_name}" + value = module.s3_bucket.bucket_domain_name description = "FQDN of bucket" } output "bucket_id" { - value = "${module.s3_bucket.bucket_arn}" + value = module.s3_bucket.bucket_arn description = "Bucket Name (aka ID)" } output "bucket_arn" { - value = "${module.s3_bucket.bucket_arn}" + value = module.s3_bucket.bucket_arn description = "Bucket ARN" } output "user_name" { - value = "${module.s3_bucket.user_name}" + value = module.s3_bucket.user_name description = "Normalized IAM user name" } output "user_arn" { - value = "${module.s3_bucket.user_arn}" + value = module.s3_bucket.user_arn description = "The ARN assigned by AWS for the user" } output "user_unique_id" { - value = "${module.s3_bucket.user_unique_id}" + value = module.s3_bucket.user_unique_id description = "The user unique ID assigned by AWS" } output "access_key_id" { sensitive = true - value = "${module.s3_bucket.access_key_id}" + value = module.s3_bucket.access_key_id description = "The access key ID" } output "secret_access_key" { sensitive = true - value = "${module.s3_bucket.secret_access_key}" + value = module.s3_bucket.secret_access_key description = "The secret access key. This will be written to the state file in plain-text" } diff --git a/deprecated/aws/chamber/user.tf b/deprecated/aws/chamber/user.tf index 6f675dcdb..839d4192a 100644 --- a/deprecated/aws/chamber/user.tf +++ b/deprecated/aws/chamber/user.tf @@ -10,12 +10,12 @@ variable "chamber_user_enabled" { # https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-access.html module "chamber_user" { source = "git::https://github.com/cloudposse/terraform-aws-iam-chamber-user.git?ref=tags/0.1.7" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "chamber" - enabled = "${var.chamber_user_enabled}" + enabled = var.chamber_user_enabled attributes = ["codefresh"] - kms_key_arn = "${module.chamber_kms_key.key_arn}" + kms_key_arn = module.chamber_kms_key.key_arn ssm_resources = [ "${formatlist("arn:aws:ssm:%s:%s:parameter/%s/*", data.aws_region.default.name, data.aws_caller_identity.default.account_id, var.parameter_groups)}", @@ -23,26 +23,26 @@ module "chamber_user" { } output "chamber_user_name" { - value = "${module.chamber_user.user_name}" + value = module.chamber_user.user_name description = "Normalized IAM user name" } output "chamber_user_arn" { - value = "${module.chamber_user.user_arn}" + value = module.chamber_user.user_arn description = "The ARN assigned by AWS for the user" } output "chamber_user_unique_id" { - value = "${module.chamber_user.user_unique_id}" + value = module.chamber_user.user_unique_id description = "The user unique ID assigned by AWS" } output "chamber_access_key_id" { - value = "${module.chamber_user.access_key_id}" + value = module.chamber_user.access_key_id description = "The access key ID" } output "chamber_secret_access_key" { - value = "${module.chamber_user.secret_access_key}" + value = module.chamber_user.secret_access_key description = "The secret access key. This will be written to the state file in plain-text" } diff --git a/deprecated/aws/cis-aggregator-auth/main.tf b/deprecated/aws/cis-aggregator-auth/main.tf index 6f82af27b..0889cf311 100644 --- a/deprecated/aws/cis-aggregator-auth/main.tf +++ b/deprecated/aws/cis-aggregator-auth/main.tf @@ -6,23 +6,23 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } # Define composite variables for resources module "label" { source = "git::https://github.com/cloudposse/terraform-null-label.git?ref=tags/0.5.3" - enabled = "${var.enabled}" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" - delimiter = "${var.delimiter}" - attributes = "${var.attributes}" - tags = "${var.tags}" + enabled = var.enabled + namespace = var.namespace + name = var.name + stage = var.stage + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags } resource "aws_config_aggregate_authorization" "default" { - account_id = "${var.aggregator_account}" - region = "${var.aggregator_region}" + account_id = var.aggregator_account + region = var.aggregator_region } diff --git a/deprecated/aws/cis-aggregator-auth/output.tf b/deprecated/aws/cis-aggregator-auth/output.tf index 8b1378917..e69de29bb 100644 --- a/deprecated/aws/cis-aggregator-auth/output.tf +++ b/deprecated/aws/cis-aggregator-auth/output.tf @@ -1 +0,0 @@ - diff --git a/deprecated/aws/cis-aggregator-auth/variables.tf b/deprecated/aws/cis-aggregator-auth/variables.tf index 310d3f774..785cd85da 100644 --- a/deprecated/aws/cis-aggregator-auth/variables.tf +++ b/deprecated/aws/cis-aggregator-auth/variables.tf @@ -1,5 +1,5 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "enabled" { @@ -8,12 +8,12 @@ variable "enabled" { } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } @@ -23,25 +23,25 @@ variable "name" { } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter between `name`, `namespace`, `stage` and `attributes`" } variable "attributes" { - type = "list" + type = list(string) description = "Additional attributes (_e.g._ \"1\")" default = [] } variable "tags" { - type = "map" + type = map(string) description = "Additional tags (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } variable "parameters" { - type = "map" + type = map(string) description = "Key-value map of input parameters for the Stack Set template. (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } diff --git a/deprecated/aws/cis-aggregator/main.tf b/deprecated/aws/cis-aggregator/main.tf index 8abe4e2a7..7d7afe099 100644 --- a/deprecated/aws/cis-aggregator/main.tf +++ b/deprecated/aws/cis-aggregator/main.tf @@ -6,25 +6,25 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } # Define composite variables for resources module "label" { source = "git::https://github.com/cloudposse/terraform-null-label.git?ref=tags/0.5.3" - enabled = "${var.enabled}" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" - delimiter = "${var.delimiter}" - attributes = "${var.attributes}" - tags = "${var.tags}" + enabled = var.enabled + namespace = var.namespace + name = var.name + stage = var.stage + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags } resource "aws_config_configuration_aggregator" "default" { - count = "${var.enabled == "true" ? 1 : 0}" - name = "${module.label.id}" + count = var.enabled == "true" ? 1 : 0 + name = module.label.id account_aggregation_source { account_ids = ["${var.accounts}"] diff --git a/deprecated/aws/cis-aggregator/output.tf b/deprecated/aws/cis-aggregator/output.tf index 8b1378917..e69de29bb 100644 --- a/deprecated/aws/cis-aggregator/output.tf +++ b/deprecated/aws/cis-aggregator/output.tf @@ -1 +0,0 @@ - diff --git a/deprecated/aws/cis-aggregator/variables.tf b/deprecated/aws/cis-aggregator/variables.tf index 95a170bb4..666d24b43 100644 --- a/deprecated/aws/cis-aggregator/variables.tf +++ b/deprecated/aws/cis-aggregator/variables.tf @@ -1,5 +1,5 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "enabled" { @@ -8,12 +8,12 @@ variable "enabled" { } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } @@ -23,35 +23,35 @@ variable "name" { } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter between `name`, `namespace`, `stage` and `attributes`" } variable "attributes" { - type = "list" + type = list(string) description = "Additional attributes (_e.g._ \"1\")" default = [] } variable "tags" { - type = "map" + type = map(string) description = "Additional tags (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } variable "parameters" { - type = "map" + type = map(string) description = "Key-value map of input parameters for the Stack Set template. (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } variable "accounts" { - type = "list" + type = list(string) default = [] } variable "regions" { - type = "list" + type = list(string) default = [] } diff --git a/deprecated/aws/cis-executor/main.tf b/deprecated/aws/cis-executor/main.tf index f750eb0e8..dd9c743d9 100644 --- a/deprecated/aws/cis-executor/main.tf +++ b/deprecated/aws/cis-executor/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -17,10 +17,10 @@ locals { module "default" { source = "git::https://github.com/cloudposse/terraform-aws-iam-role.git?ref=tags/0.3.0" - enabled = "${var.enabled}" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.executor_role_name}" + enabled = var.enabled + namespace = var.namespace + stage = var.stage + name = local.executor_role_name use_fullname = "false" attributes = ["${var.attributes}"] role_description = "IAM Role in all target accounts for Stack Set operations" diff --git a/deprecated/aws/cis-executor/output.tf b/deprecated/aws/cis-executor/output.tf index 8b1378917..e69de29bb 100644 --- a/deprecated/aws/cis-executor/output.tf +++ b/deprecated/aws/cis-executor/output.tf @@ -1 +0,0 @@ - diff --git a/deprecated/aws/cis-executor/variables.tf b/deprecated/aws/cis-executor/variables.tf index c13290666..e713d2042 100644 --- a/deprecated/aws/cis-executor/variables.tf +++ b/deprecated/aws/cis-executor/variables.tf @@ -1,5 +1,5 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "enabled" { @@ -8,29 +8,29 @@ variable "enabled" { } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter between `name`, `namespace`, `stage` and `attributes`" } variable "attributes" { - type = "list" + type = list(string) description = "Additional attributes (_e.g._ \"1\")" default = ["executor"] } variable "tags" { - type = "map" + type = map(string) description = "Additional tags (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } diff --git a/deprecated/aws/cis-instances/main.tf b/deprecated/aws/cis-instances/main.tf index 6af3f68bc..3a9a8278b 100644 --- a/deprecated/aws/cis-instances/main.tf +++ b/deprecated/aws/cis-instances/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -20,22 +20,22 @@ data "terraform_remote_state" "cis" { } resource "null_resource" "instances" { - count = "${var.enabled == "true" ? length(keys(var.cis_instances)) : 0}" + count = var.enabled == "true" ? length(keys(var.cis_instances)) : 0 triggers { - account = "${join("|", formatlist("%s:%s", element(keys(var.cis_instances), count.index), var.cis_instances[element(keys(var.cis_instances), count.index)]))}" + account = join("|", formatlist("%s:%s", element(keys(var.cis_instances), count.index), var.cis_instances[element(keys(var.cis_instances), count.index)])) } } locals { raw_instances = ["${split("|", join("|", null_resource.instances.*.triggers.account))}"] - instances = "${compact(local.raw_instances)}" + instances = compact(local.raw_instances) } resource "aws_cloudformation_stack_set_instance" "default" { - count = "${var.enabled == "true" && length(local.instances) > 0 ? length(local.instances) : 0}" - stack_set_name = "${data.terraform_remote_state.cis.name}" - account_id = "${element(split(":", element(local.instances, count.index)), 0)}" - region = "${element(split(":", element(local.instances, count.index)), 1)}" - parameter_overrides = "${var.parameters}" + count = var.enabled == "true" && length(local.instances) > 0 ? length(local.instances) : 0 + stack_set_name = data.terraform_remote_state.cis.name + account_id = element(split(":", element(local.instances, count.index)), 0) + region = element(split(":", element(local.instances, count.index)), 1) + parameter_overrides = var.parameters } diff --git a/deprecated/aws/cis-instances/output.tf b/deprecated/aws/cis-instances/output.tf index 8b1378917..e69de29bb 100644 --- a/deprecated/aws/cis-instances/output.tf +++ b/deprecated/aws/cis-instances/output.tf @@ -1 +0,0 @@ - diff --git a/deprecated/aws/cis-instances/variables.tf b/deprecated/aws/cis-instances/variables.tf index 99877542b..2f3995bf8 100644 --- a/deprecated/aws/cis-instances/variables.tf +++ b/deprecated/aws/cis-instances/variables.tf @@ -1,5 +1,5 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "enabled" { @@ -8,12 +8,12 @@ variable "enabled" { } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } @@ -23,30 +23,30 @@ variable "name" { } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter between `name`, `namespace`, `stage` and `attributes`" } variable "attributes" { - type = "list" + type = list(string) description = "Additional attributes (_e.g._ \"1\")" default = [] } variable "tags" { - type = "map" + type = map(string) description = "Additional tags (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } variable "parameters" { - type = "map" + type = map(string) description = "Key-value map of input parameters override for the Stack Set template. (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } variable "cis_instances" { - type = "map" + type = map(string) default = {} } diff --git a/deprecated/aws/cis/main.tf b/deprecated/aws/cis/main.tf index 9b376a29b..6437148ee 100644 --- a/deprecated/aws/cis/main.tf +++ b/deprecated/aws/cis/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -18,13 +18,13 @@ locals { module "default" { source = "git::https://github.com/cloudposse/terraform-aws-cloudformation-stack-set.git?ref=tags/0.1.0" - enabled = "${var.enabled}" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + enabled = var.enabled + namespace = var.namespace + stage = var.stage + name = var.name attributes = ["${var.attributes}"] - parameters = "${var.parameters}" - template_url = "${local.template_url}" - executor_role_name = "${local.executor_role_name}" - capabilities = "${var.capabilities}" + parameters = var.parameters + template_url = local.template_url + executor_role_name = local.executor_role_name + capabilities = var.capabilities } diff --git a/deprecated/aws/cis/output.tf b/deprecated/aws/cis/output.tf index 08c124c62..bc6089617 100644 --- a/deprecated/aws/cis/output.tf +++ b/deprecated/aws/cis/output.tf @@ -1,11 +1,11 @@ output "administrator_role_arn" { - value = "${module.default.administrator_role_arn}" + value = module.default.administrator_role_arn } output "executor_role_name" { - value = "${module.default.executor_role_name}" + value = module.default.executor_role_name } output "name" { - value = "${module.default.name}" + value = module.default.name } diff --git a/deprecated/aws/cis/variables.tf b/deprecated/aws/cis/variables.tf index 797c98ffb..0e51a7b26 100644 --- a/deprecated/aws/cis/variables.tf +++ b/deprecated/aws/cis/variables.tf @@ -1,5 +1,5 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "enabled" { @@ -8,12 +8,12 @@ variable "enabled" { } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } @@ -23,31 +23,31 @@ variable "name" { } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter between `name`, `namespace`, `stage` and `attributes`" } variable "attributes" { - type = "list" + type = list(string) description = "Additional attributes (_e.g._ \"1\")" default = [] } variable "tags" { - type = "map" + type = map(string) description = "Additional tags (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } variable "parameters" { - type = "map" + type = map(string) description = "Key-value map of input parameters for the Stack Set template. (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } variable "capabilities" { - type = "list" + type = list(string) description = "A list of capabilities. Valid values: CAPABILITY_IAM, CAPABILITY_NAMED_IAM, CAPABILITY_AUTO_EXPAND" default = [] } diff --git a/deprecated/aws/cloudtrail/cloudwatch_logs.tf b/deprecated/aws/cloudtrail/cloudwatch_logs.tf index a963ad9d6..23135b8df 100644 --- a/deprecated/aws/cloudtrail/cloudwatch_logs.tf +++ b/deprecated/aws/cloudtrail/cloudwatch_logs.tf @@ -1,11 +1,11 @@ module "logs" { source = "git::https://github.com/cloudposse/terraform-aws-cloudwatch-logs.git?ref=tags/0.3.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name attributes = ["cloudwatch", "logs"] - retention_in_days = "${var.cloudwatch_logs_retention_in_days}" + retention_in_days = var.cloudwatch_logs_retention_in_days principals = { Service = ["cloudtrail.amazonaws.com"] @@ -18,16 +18,16 @@ module "logs" { module "kms_key_logs" { source = "git::https://github.com/cloudposse/terraform-aws-kms-key.git?ref=tags/0.1.3" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" + namespace = var.namespace + name = var.name + stage = var.stage attributes = ["cloudwatch", "logs"] description = "KMS key for CloudWatch" deletion_window_in_days = 10 enable_key_rotation = "true" - policy = "${data.aws_iam_policy_document.kms_key_logs.json}" + policy = data.aws_iam_policy_document.kms_key_logs.json } data "aws_iam_policy_document" "kms_key_logs" { diff --git a/deprecated/aws/cloudtrail/main.tf b/deprecated/aws/cloudtrail/main.tf index 2ac7db661..777ac73c7 100644 --- a/deprecated/aws/cloudtrail/main.tf +++ b/deprecated/aws/cloudtrail/main.tf @@ -5,38 +5,38 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string description = "Name (e.g. `account`)" default = "account" } variable "kms_key_arn" { - type = "string" + type = string description = "" } variable "region" { - type = "string" + type = string description = "AWS region" default = "" } @@ -49,20 +49,20 @@ variable "cloudwatch_logs_retention_in_days" { data "aws_region" "default" {} locals { - region = "${length(var.region) > 0 ? var.region : data.aws_region.default.name}" + region = length(var.region) > 0 ? var.region : data.aws_region.default.name } module "cloudtrail" { source = "git::https://github.com/cloudposse/terraform-aws-cloudtrail.git?ref=tags/0.7.1" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name enable_logging = "true" enable_log_file_validation = "true" include_global_service_events = "true" is_multi_region_trail = "true" s3_bucket_name = "${var.namespace}-audit-account" - kms_key_arn = "${var.kms_key_arn}" - cloud_watch_logs_group_arn = "${module.logs.log_group_arn}" - cloud_watch_logs_role_arn = "${module.logs.role_arn}" + kms_key_arn = var.kms_key_arn + cloud_watch_logs_group_arn = module.logs.log_group_arn + cloud_watch_logs_role_arn = module.logs.role_arn } diff --git a/deprecated/aws/codefresh-onprem/kops-metadata.tf b/deprecated/aws/codefresh-onprem/kops-metadata.tf index ea764cfb6..9433f8a5f 100644 --- a/deprecated/aws/codefresh-onprem/kops-metadata.tf +++ b/deprecated/aws/codefresh-onprem/kops-metadata.tf @@ -1,11 +1,11 @@ variable "kops_metadata_enabled" { description = "Set to false to prevent the module from creating any resources" - type = "string" + type = string default = "false" } module "kops_metadata" { source = "git::https://github.com/cloudposse/terraform-aws-kops-metadata.git?ref=tags/0.2.0" dns_zone = "${var.region}.${var.zone_name}" - enabled = "${var.kops_metadata_enabled}" + enabled = var.kops_metadata_enabled } diff --git a/deprecated/aws/codefresh-onprem/main.tf b/deprecated/aws/codefresh-onprem/main.tf index 2d9ec2111..f55264d58 100644 --- a/deprecated/aws/codefresh-onprem/main.tf +++ b/deprecated/aws/codefresh-onprem/main.tf @@ -6,31 +6,31 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `eg` or `cp`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "region" { - type = "string" + type = string description = "AWS region" } variable "zone_name" { - type = "string" + type = string description = "DNS zone name" } @@ -44,25 +44,25 @@ variable "acm_primary_domain" { } variable "acm_san_domains" { - type = "list" + type = list(string) default = [] description = "A list of domains that should be SANs in the issued certificate" } variable "acm_zone_name" { - type = "string" + type = string default = "" description = "The name of the desired Route53 Hosted Zone" } variable "redis_cluster_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } variable "postgres_cluster_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } @@ -73,49 +73,49 @@ variable "documentdb_cluster_enabled" { } variable "documentdb_instance_class" { - type = "string" + type = string default = "db.r4.large" description = "The instance class to use. For more details, see https://docs.aws.amazon.com/documentdb/latest/developerguide/db-instance-classes.html#db-instance-class-specs" } variable "documentdb_cluster_size" { - type = "string" + type = string default = "3" description = "Number of DocumentDB instances to create in the cluster" } variable "documentdb_port" { - type = "string" + type = string default = "27017" description = "DocumentDB port" } variable "documentdb_master_username" { - type = "string" + type = string default = "" description = "Username for the master DocumentDB user. If left empty, will be generated automatically" } variable "documentdb_master_password" { - type = "string" + type = string default = "" description = "Password for the master DocumentDB user. If left empty, will be generated automatically. Note that this may show up in logs, and it will be stored in the state file" } variable "documentdb_retention_period" { - type = "string" + type = string default = "5" description = "Number of days to retain DocumentDB backups for" } variable "documentdb_preferred_backup_window" { - type = "string" + type = string default = "07:00-09:00" description = "Daily time range during which the DocumentDB backups happen" } variable "documentdb_cluster_parameters" { - type = "list" + type = list(string) default = [ { @@ -128,19 +128,19 @@ variable "documentdb_cluster_parameters" { } variable "documentdb_cluster_family" { - type = "string" + type = string default = "docdb3.6" description = "The family of the DocumentDB cluster parameter group. For more details, see https://docs.aws.amazon.com/documentdb/latest/developerguide/db-cluster-parameter-group-create.html" } variable "documentdb_engine" { - type = "string" + type = string default = "docdb" description = "The name of the database engine to be used for DocumentDB cluster. Defaults to `docdb`. Valid values: `docdb`" } variable "documentdb_engine_version" { - type = "string" + type = string default = "" description = "The version number of the DocumentDB database engine to use" } @@ -161,13 +161,13 @@ variable "documentdb_apply_immediately" { } variable "documentdb_enabled_cloudwatch_logs_exports" { - type = "list" + type = list(string) description = "List of DocumentDB log types to export to CloudWatch. The following log types are supported: audit, error, general, slowquery" default = [] } variable "documentdb_chamber_parameters_mapping" { - type = "map" + type = map(string) default = { documentdb_connection_uri = "MONGODB_URI" @@ -187,187 +187,187 @@ data "terraform_remote_state" "backing_services" { module "codefresh_enterprise_backing_services" { source = "git::https://github.com/cloudposse/terraform-aws-codefresh-backing-services.git?ref=tags/0.8.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - vpc_id = "${data.terraform_remote_state.backing_services.vpc_id}" + namespace = var.namespace + stage = var.stage + vpc_id = data.terraform_remote_state.backing_services.vpc_id subnet_ids = ["${data.terraform_remote_state.backing_services.private_subnet_ids}"] security_groups = ["${module.kops_metadata.nodes_security_group_id}"] - zone_name = "${var.zone_name}" + zone_name = var.zone_name chamber_service = "codefresh" - acm_enabled = "${var.acm_enabled}" - acm_primary_domain = "${var.acm_primary_domain}" + acm_enabled = var.acm_enabled + acm_primary_domain = var.acm_primary_domain acm_san_domains = ["${var.acm_san_domains}"] - redis_cluster_enabled = "${var.redis_cluster_enabled}" + redis_cluster_enabled = var.redis_cluster_enabled - postgres_cluster_enabled = "${var.postgres_cluster_enabled}" + postgres_cluster_enabled = var.postgres_cluster_enabled # DocumentDB - documentdb_cluster_enabled = "${var.documentdb_cluster_enabled}" - documentdb_instance_class = "${var.documentdb_instance_class}" - documentdb_cluster_size = "${var.documentdb_cluster_size}" - documentdb_port = "${var.documentdb_port}" - documentdb_master_username = "${var.documentdb_master_username}" - documentdb_master_password = "${var.documentdb_master_password}" - documentdb_retention_period = "${var.documentdb_retention_period}" - documentdb_preferred_backup_window = "${var.documentdb_preferred_backup_window}" + documentdb_cluster_enabled = var.documentdb_cluster_enabled + documentdb_instance_class = var.documentdb_instance_class + documentdb_cluster_size = var.documentdb_cluster_size + documentdb_port = var.documentdb_port + documentdb_master_username = var.documentdb_master_username + documentdb_master_password = var.documentdb_master_password + documentdb_retention_period = var.documentdb_retention_period + documentdb_preferred_backup_window = var.documentdb_preferred_backup_window documentdb_cluster_parameters = ["${var.documentdb_cluster_parameters}"] - documentdb_cluster_family = "${var.documentdb_cluster_family}" - documentdb_engine = "${var.documentdb_engine}" - documentdb_engine_version = "${var.documentdb_engine_version}" - documentdb_storage_encrypted = "${var.documentdb_storage_encrypted}" - documentdb_skip_final_snapshot = "${var.documentdb_skip_final_snapshot}" - documentdb_apply_immediately = "${var.documentdb_apply_immediately}" + documentdb_cluster_family = var.documentdb_cluster_family + documentdb_engine = var.documentdb_engine + documentdb_engine_version = var.documentdb_engine_version + documentdb_storage_encrypted = var.documentdb_storage_encrypted + documentdb_skip_final_snapshot = var.documentdb_skip_final_snapshot + documentdb_apply_immediately = var.documentdb_apply_immediately documentdb_enabled_cloudwatch_logs_exports = ["${var.documentdb_enabled_cloudwatch_logs_exports}"] - documentdb_chamber_parameters_mapping = "${var.documentdb_chamber_parameters_mapping}" + documentdb_chamber_parameters_mapping = var.documentdb_chamber_parameters_mapping } output "elasticache_redis_id" { - value = "${module.codefresh_enterprise_backing_services.elasticache_redis_id}" + value = module.codefresh_enterprise_backing_services.elasticache_redis_id description = "Elasticache Redis cluster ID" } output "elasticache_redis_security_group_id" { - value = "${module.codefresh_enterprise_backing_services.elasticache_redis_security_group_id}" + value = module.codefresh_enterprise_backing_services.elasticache_redis_security_group_id description = "Elasticache Redis security group ID" } output "elasticache_redis_host" { - value = "${module.codefresh_enterprise_backing_services.elasticache_redis_host}" + value = module.codefresh_enterprise_backing_services.elasticache_redis_host description = "Elasticache Redis host" } output "aurora_postgres_database_name" { - value = "${module.codefresh_enterprise_backing_services.aurora_postgres_database_name}" + value = module.codefresh_enterprise_backing_services.aurora_postgres_database_name description = "Aurora Postgres Database name" } output "aurora_postgres_master_username" { - value = "${module.codefresh_enterprise_backing_services.aurora_postgres_master_username}" + value = module.codefresh_enterprise_backing_services.aurora_postgres_master_username description = "Aurora Postgres Username for the master DB user" } output "aurora_postgres_master_hostname" { - value = "${module.codefresh_enterprise_backing_services.aurora_postgres_master_hostname}" + value = module.codefresh_enterprise_backing_services.aurora_postgres_master_hostname description = "Aurora Postgres DB Master hostname" } output "aurora_postgres_replicas_hostname" { - value = "${module.codefresh_enterprise_backing_services.aurora_postgres_replicas_hostname}" + value = module.codefresh_enterprise_backing_services.aurora_postgres_replicas_hostname description = "Aurora Postgres Replicas hostname" } output "aurora_postgres_cluster_name" { - value = "${module.codefresh_enterprise_backing_services.aurora_postgres_cluster_name}" + value = module.codefresh_enterprise_backing_services.aurora_postgres_cluster_name description = "Aurora Postgres Cluster Identifier" } output "s3_user_name" { - value = "${module.codefresh_enterprise_backing_services.s3_user_name}" + value = module.codefresh_enterprise_backing_services.s3_user_name description = "Normalized IAM user name" } output "s3_user_arn" { - value = "${module.codefresh_enterprise_backing_services.s3_user_arn}" + value = module.codefresh_enterprise_backing_services.s3_user_arn description = "The ARN assigned by AWS for the user" } output "s3_user_unique_id" { - value = "${module.codefresh_enterprise_backing_services.s3_user_unique_id}" + value = module.codefresh_enterprise_backing_services.s3_user_unique_id description = "The user unique ID assigned by AWS" } output "s3_access_key_id" { sensitive = true - value = "${module.codefresh_enterprise_backing_services.s3_access_key_id}" + value = module.codefresh_enterprise_backing_services.s3_access_key_id description = "The access key ID" } output "s3_secret_access_key" { sensitive = true - value = "${module.codefresh_enterprise_backing_services.s3_secret_access_key}" + value = module.codefresh_enterprise_backing_services.s3_secret_access_key description = "The secret access key. This will be written to the state file in plain-text" } output "s3_bucket_arn" { - value = "${module.codefresh_enterprise_backing_services.s3_bucket_arn}" + value = module.codefresh_enterprise_backing_services.s3_bucket_arn description = "The s3 bucket ARN" } output "backup_s3_user_name" { - value = "${module.codefresh_enterprise_backing_services.backup_s3_user_name}" + value = module.codefresh_enterprise_backing_services.backup_s3_user_name description = "Normalized IAM user name" } output "backup_s3_user_arn" { - value = "${module.codefresh_enterprise_backing_services.backup_s3_user_arn}" + value = module.codefresh_enterprise_backing_services.backup_s3_user_arn description = "The ARN assigned by AWS for the user" } output "backup_s3_user_unique_id" { - value = "${module.codefresh_enterprise_backing_services.backup_s3_user_unique_id}" + value = module.codefresh_enterprise_backing_services.backup_s3_user_unique_id description = "The user unique ID assigned by AWS" } output "backup_s3_access_key_id" { sensitive = true - value = "${module.codefresh_enterprise_backing_services.backup_s3_access_key_id}" + value = module.codefresh_enterprise_backing_services.backup_s3_access_key_id description = "The access key ID" } output "backup_s3_secret_access_key" { sensitive = true - value = "${module.codefresh_enterprise_backing_services.backup_s3_secret_access_key}" + value = module.codefresh_enterprise_backing_services.backup_s3_secret_access_key description = "The secret access key. This will be written to the state file in plain-text" } output "backup_s3_bucket_arn" { - value = "${module.codefresh_enterprise_backing_services.backup_s3_bucket_arn}" + value = module.codefresh_enterprise_backing_services.backup_s3_bucket_arn description = "The backup_s3 bucket ARN" } output "acm_arn" { - value = "${module.codefresh_enterprise_backing_services.acm_arn}" + value = module.codefresh_enterprise_backing_services.acm_arn description = "The ARN of the certificate" } output "acm_domain_validation_options" { - value = "${module.codefresh_enterprise_backing_services.acm_domain_validation_options}" + value = module.codefresh_enterprise_backing_services.acm_domain_validation_options description = "CNAME records that are added to the DNS zone to complete certificate validation" } output "documentdb_master_username" { - value = "${module.codefresh_enterprise_backing_services.documentdb_master_username}" + value = module.codefresh_enterprise_backing_services.documentdb_master_username description = "DocumentDB Username for the master DB user" } output "documentdb_cluster_name" { - value = "${module.codefresh_enterprise_backing_services.documentdb_cluster_name}" + value = module.codefresh_enterprise_backing_services.documentdb_cluster_name description = "DocumentDB Cluster Identifier" } output "documentdb_arn" { - value = "${module.codefresh_enterprise_backing_services.documentdb_arn}" + value = module.codefresh_enterprise_backing_services.documentdb_arn description = "Amazon Resource Name (ARN) of the DocumentDB cluster" } output "documentdb_endpoint" { - value = "${module.codefresh_enterprise_backing_services.documentdb_endpoint}" + value = module.codefresh_enterprise_backing_services.documentdb_endpoint description = "Endpoint of the DocumentDB cluster" } output "documentdb_reader_endpoint" { - value = "${module.codefresh_enterprise_backing_services.documentdb_reader_endpoint}" + value = module.codefresh_enterprise_backing_services.documentdb_reader_endpoint description = "Read-only endpoint of the DocumentDB cluster, automatically load-balanced across replicas" } output "documentdb_master_host" { - value = "${module.codefresh_enterprise_backing_services.documentdb_master_host}" + value = module.codefresh_enterprise_backing_services.documentdb_master_host description = "DocumentDB master hostname" } output "documentdb_replicas_host" { - value = "${module.codefresh_enterprise_backing_services.documentdb_replicas_host}" + value = module.codefresh_enterprise_backing_services.documentdb_replicas_host description = "DocumentDB replicas hostname" } diff --git a/deprecated/aws/datadog/main.tf b/deprecated/aws/datadog/main.tf index 650d3cd81..d69999239 100644 --- a/deprecated/aws/datadog/main.tf +++ b/deprecated/aws/datadog/main.tf @@ -5,12 +5,12 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -21,15 +21,15 @@ module "datadog_ids" { module "datadog_aws_integration" { source = "git::https://github.com/cloudposse/terraform-datadog-aws-integration.git?ref=tags/0.2.0" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "datadog" - datadog_external_id = "${lookup(module.datadog_ids.map, "/datadog/datadog_external_id")}" - integrations = "${var.integrations}" + datadog_external_id = lookup(module.datadog_ids.map, "/datadog/datadog_external_id") + integrations = var.integrations } locals { - chamber_service = "${var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service}" + chamber_service = var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service } resource "random_string" "tokens" { @@ -39,12 +39,12 @@ resource "random_string" "tokens" { number = false special = false - keepers = "${module.datadog_ids.map}" + keepers = module.datadog_ids.map } resource "aws_ssm_parameter" "datadog_cluster_agent_token" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "datadog_cluster_agent_token")}" - value = "${random_string.tokens.result}" + name = format(var.chamber_parameter_name, local.chamber_service, "datadog_cluster_agent_token") + value = random_string.tokens.result description = "A cluster-internal secret for agent-to-agent communication. Must be 32+ characters a-zA-Z" type = "String" overwrite = "true" diff --git a/deprecated/aws/datadog/variables.tf b/deprecated/aws/datadog/variables.tf index 63fd6a651..67c9153a3 100644 --- a/deprecated/aws/datadog/variables.tf +++ b/deprecated/aws/datadog/variables.tf @@ -7,7 +7,7 @@ variable "stage" { } variable "integrations" { - type = "list" + type = list(string) description = "List of integration names with permissions to apply (`all`, `core`, `rds`)" } diff --git a/deprecated/aws/docs/main.tf b/deprecated/aws/docs/main.tf index a4e39587d..03034d88e 100644 --- a/deprecated/aws/docs/main.tf +++ b/deprecated/aws/docs/main.tf @@ -5,36 +5,36 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "domain_name" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "region" { - type = "string" + type = string description = "AWS region" } variable "account_id" { - type = "string" + type = string description = "AWS account ID" } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -43,14 +43,14 @@ provider "aws" { region = "us-east-1" assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } # https://www.terraform.io/docs/providers/aws/d/acm_certificate.html data "aws_acm_certificate" "acm_cloudfront_certificate" { provider = "aws.virginia" - domain = "${var.domain_name}" + domain = var.domain_name statuses = ["ISSUED"] types = ["AMAZON_ISSUED"] } @@ -63,19 +63,19 @@ locals { module "docs_user" { source = "git::https://github.com/cloudposse/terraform-aws-iam-system-user.git?ref=tags/0.2.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" + namespace = var.namespace + stage = var.stage + name = local.name } module "origin" { source = "git::https://github.com/cloudposse/terraform-aws-s3-website.git?ref=tags/0.5.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" - hostname = "${local.cdn_domain}" - parent_zone_name = "${var.domain_name}" - region = "${var.region}" + namespace = var.namespace + stage = var.stage + name = local.name + hostname = local.cdn_domain + parent_zone_name = var.domain_name + region = var.region cors_allowed_headers = ["*"] cors_allowed_methods = ["GET"] cors_allowed_origins = ["*"] @@ -100,14 +100,14 @@ module "origin" { # CloudFront CDN fronting origin module "cdn" { source = "git::https://github.com/cloudposse/terraform-aws-cloudfront-cdn.git?ref=tags/0.4.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" + namespace = var.namespace + stage = var.stage + name = local.name aliases = ["${local.cdn_domain}", "docs.cloudposse.com"] - origin_domain_name = "${module.origin.s3_bucket_website_endpoint}" + origin_domain_name = module.origin.s3_bucket_website_endpoint origin_protocol_policy = "http-only" viewer_protocol_policy = "redirect-to-https" - parent_zone_name = "${var.domain_name}" + parent_zone_name = var.domain_name forward_cookies = "none" forward_headers = ["Origin", "Access-Control-Request-Headers", "Access-Control-Request-Method"] default_ttl = 60 @@ -118,5 +118,5 @@ module "cdn" { allowed_methods = ["GET", "HEAD", "OPTIONS"] price_class = "PriceClass_All" default_root_object = "index.html" - acm_certificate_arn = "${data.aws_acm_certificate.acm_cloudfront_certificate.arn}" + acm_certificate_arn = data.aws_acm_certificate.acm_cloudfront_certificate.arn } diff --git a/deprecated/aws/docs/outputs.tf b/deprecated/aws/docs/outputs.tf index 6e9724b43..4409c0281 100644 --- a/deprecated/aws/docs/outputs.tf +++ b/deprecated/aws/docs/outputs.tf @@ -1,80 +1,80 @@ output "docs_user_name" { - value = "${module.docs_user.user_name}" + value = module.docs_user.user_name description = "Normalized IAM user name" } output "docs_user_arn" { - value = "${module.docs_user.user_arn}" + value = module.docs_user.user_arn description = "The ARN assigned by AWS for the user" } output "docs_user_unique_id" { - value = "${module.docs_user.user_unique_id}" + value = module.docs_user.user_unique_id description = "The user unique ID assigned by AWS" } output "docs_user_access_key_id" { - value = "${module.docs_user.access_key_id}" + value = module.docs_user.access_key_id description = "The access key ID" } output "docs_user_secret_access_key" { - value = "${module.docs_user.secret_access_key}" + value = module.docs_user.secret_access_key description = "The secret access key. This will be written to the state file in plain-text" } output "docs_s3_bucket_name" { - value = "${module.origin.s3_bucket_name}" + value = module.origin.s3_bucket_name } output "docs_s3_bucket_domain_name" { - value = "${module.origin.s3_bucket_domain_name}" + value = module.origin.s3_bucket_domain_name } output "docs_s3_bucket_arn" { - value = "${module.origin.s3_bucket_arn}" + value = module.origin.s3_bucket_arn } output "docs_s3_bucket_website_endpoint" { - value = "${module.origin.s3_bucket_website_endpoint}" + value = module.origin.s3_bucket_website_endpoint } output "docs_s3_bucket_website_domain" { - value = "${module.origin.s3_bucket_website_domain}" + value = module.origin.s3_bucket_website_domain } output "docs_s3_bucket_hosted_zone_id" { - value = "${module.origin.s3_bucket_hosted_zone_id}" + value = module.origin.s3_bucket_hosted_zone_id } output "docs_cloudfront_id" { - value = "${module.cdn.cf_id}" + value = module.cdn.cf_id } output "docs_cloudfront_arn" { - value = "${module.cdn.cf_arn}" + value = module.cdn.cf_arn } output "docs_cloudfront_aliases" { - value = "${module.cdn.cf_aliases}" + value = module.cdn.cf_aliases } output "docs_cloudfront_status" { - value = "${module.cdn.cf_status}" + value = module.cdn.cf_status } output "docs_cloudfront_domain_name" { - value = "${module.cdn.cf_domain_name}" + value = module.cdn.cf_domain_name } output "docs_cloudfront_etag" { - value = "${module.cdn.cf_etag}" + value = module.cdn.cf_etag } output "docs_cloudfront_hosted_zone_id" { - value = "${module.cdn.cf_hosted_zone_id}" + value = module.cdn.cf_hosted_zone_id } output "docs_cloudfront_origin_access_identity_path" { - value = "${module.cdn.cf_origin_access_identity}" + value = module.cdn.cf_origin_access_identity } diff --git a/deprecated/aws/ecr/kops_ecr_app.tf b/deprecated/aws/ecr/kops_ecr_app.tf index ef5ff0d11..05c2768d9 100644 --- a/deprecated/aws/ecr/kops_ecr_app.tf +++ b/deprecated/aws/ecr/kops_ecr_app.tf @@ -10,29 +10,29 @@ variable "kops_ecr_app_enabled" { module "kops_ecr_app" { source = "git::https://github.com/cloudposse/terraform-aws-ecr.git?ref=tags/0.6.1" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.kops_ecr_app_repository_name}" + namespace = var.namespace + stage = var.stage + name = var.kops_ecr_app_repository_name - enabled = "${var.kops_ecr_app_enabled}" + enabled = var.kops_ecr_app_enabled principals_full_access = ["${local.principals_full_access}"] principals_readonly_access = ["${local.principals_readonly_access}"] - tags = "${module.label.tags}" + tags = module.label.tags } output "kops_ecr_app_registry_id" { - value = "${module.kops_ecr_app.registry_id}" + value = module.kops_ecr_app.registry_id description = "Registry app ID" } output "kops_ecr_app_registry_url" { - value = "${module.kops_ecr_app.registry_url}" + value = module.kops_ecr_app.registry_url description = "Registry app URL" } output "kops_ecr_app_repository_name" { - value = "${module.kops_ecr_app.repository_name}" + value = module.kops_ecr_app.repository_name description = "Registry app name" } diff --git a/deprecated/aws/ecr/kops_ecr_user.tf b/deprecated/aws/ecr/kops_ecr_user.tf index 2b5755311..a7547c386 100644 --- a/deprecated/aws/ecr/kops_ecr_user.tf +++ b/deprecated/aws/ecr/kops_ecr_user.tf @@ -1,10 +1,10 @@ module "kops_ecr_user" { source = "git::https://github.com/cloudposse/terraform-aws-iam-system-user.git?ref=tags/0.3.0" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "cicd" - tags = "${module.label.tags}" + tags = module.label.tags } data "aws_iam_policy_document" "login" { @@ -17,38 +17,38 @@ data "aws_iam_policy_document" "login" { } resource "aws_iam_policy" "login" { - name = "${module.label.id}" - policy = "${data.aws_iam_policy_document.login.json}" + name = module.label.id + policy = data.aws_iam_policy_document.login.json } resource "aws_iam_user_policy_attachment" "user_login" { - user = "${module.kops_ecr_user.user_name}" - policy_arn = "${aws_iam_policy.login.arn}" + user = module.kops_ecr_user.user_name + policy_arn = aws_iam_policy.login.arn } output "kops_ecr_user_name" { - value = "${module.kops_ecr_user.user_name}" + value = module.kops_ecr_user.user_name description = "Normalized IAM user name" } output "kops_ecr_user_arn" { - value = "${module.kops_ecr_user.user_arn}" + value = module.kops_ecr_user.user_arn description = "The ARN assigned by AWS for the user" } output "kops_ecr_user_unique_id" { - value = "${module.kops_ecr_user.user_unique_id}" + value = module.kops_ecr_user.user_unique_id description = "The user unique ID assigned by AWS" } output "kops_ecr_user_access_key_id" { sensitive = true - value = "${module.kops_ecr_user.access_key_id}" + value = module.kops_ecr_user.access_key_id description = "The access key ID" } output "kops_ecr_user_secret_access_key" { sensitive = true - value = "${module.kops_ecr_user.secret_access_key}" + value = module.kops_ecr_user.secret_access_key description = "The secret access key. This will be written to the state file in plain-text" } diff --git a/deprecated/aws/ecr/main.tf b/deprecated/aws/ecr/main.tf index 425dc3374..9ea6280f8 100644 --- a/deprecated/aws/ecr/main.tf +++ b/deprecated/aws/ecr/main.tf @@ -5,39 +5,39 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "cluster_name" { - type = "string" + type = string description = "kops cluster name" } variable "external_principals_full_access" { - type = "list" + type = list(string) description = "Principal ARN to provide with full access to the ECR" default = [] } variable "external_principals_readonly_access" { - type = "list" + type = list(string) description = "Principal ARN to provide with readonly access to the ECR" default = [] } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -48,13 +48,13 @@ locals { module "label" { source = "git::https://github.com/cloudposse/terraform-null-label.git?ref=tags/0.5.4" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "ecr" - tags = "${map("Cluster", var.cluster_name)}" + tags = map("Cluster", var.cluster_name) } module "kops_metadata" { source = "git::https://github.com/cloudposse/terraform-aws-kops-data-iam.git?ref=tags/0.1.0" - cluster_name = "${var.cluster_name}" + cluster_name = var.cluster_name } diff --git a/deprecated/aws/eks-backing-services-peering/main.tf b/deprecated/aws/eks-backing-services-peering/main.tf index 66e036e8c..0dea296dd 100644 --- a/deprecated/aws/eks-backing-services-peering/main.tf +++ b/deprecated/aws/eks-backing-services-peering/main.tf @@ -16,12 +16,12 @@ data "aws_vpc" "backing_services_vpc" { module "vpc_peering" { source = "git::https://github.com/cloudposse/terraform-aws-vpc-peering.git?ref=tags/0.1.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - delimiter = "${var.delimiter}" + namespace = var.namespace + stage = var.stage + name = var.name + delimiter = var.delimiter attributes = ["${compact(concat(var.attributes, list("peering")))}"] - tags = "${var.tags}" - requestor_vpc_id = "${data.aws_vpc.eks_vpc.id}" - acceptor_vpc_id = "${data.aws_vpc.backing_services_vpc.id}" + tags = var.tags + requestor_vpc_id = data.aws_vpc.eks_vpc.id + acceptor_vpc_id = data.aws_vpc.backing_services_vpc.id } diff --git a/deprecated/aws/eks-backing-services-peering/outputs.tf b/deprecated/aws/eks-backing-services-peering/outputs.tf index e5bed5be0..2f7ec3399 100644 --- a/deprecated/aws/eks-backing-services-peering/outputs.tf +++ b/deprecated/aws/eks-backing-services-peering/outputs.tf @@ -1,9 +1,9 @@ output "vpc_peering_connection_id" { - value = "${module.vpc_peering.connection_id}" + value = module.vpc_peering.connection_id description = "VPC peering connection ID" } output "vpc_peering_accept_status" { - value = "${module.vpc_peering.accept_status}" + value = module.vpc_peering.accept_status description = "The status of the VPC peering connection request" } diff --git a/deprecated/aws/eks-backing-services-peering/variables.tf b/deprecated/aws/eks-backing-services-peering/variables.tf index 61dca67f8..96bc04012 100644 --- a/deprecated/aws/eks-backing-services-peering/variables.tf +++ b/deprecated/aws/eks-backing-services-peering/variables.tf @@ -1,33 +1,33 @@ variable "namespace" { - type = "string" + type = string description = "Namespace, which could be your organization name, e.g. 'eg' or 'cp'" } variable "stage" { - type = "string" + type = string description = "Stage, e.g. 'prod', 'staging', 'dev' or 'testing'" } variable "name" { - type = "string" + type = string default = "eks" description = "Solution name, e.g. 'app' or 'cluster'" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter to be used between `name`, `namespace`, `stage`, etc." } variable "attributes" { - type = "list" + type = list(string) default = [] description = "Additional attributes (e.g. `1`)" } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags (e.g. `map('BusinessUnit`,`XYZ`)" } diff --git a/deprecated/aws/eks/eks.tf b/deprecated/aws/eks/eks.tf index fe11eee22..27d70437e 100644 --- a/deprecated/aws/eks/eks.tf +++ b/deprecated/aws/eks/eks.tf @@ -1,89 +1,89 @@ module "label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.1.6" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" - delimiter = "${var.delimiter}" - attributes = "${var.attributes}" - tags = "${var.tags}" - enabled = "${var.enabled}" + namespace = var.namespace + name = var.name + stage = var.stage + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + enabled = var.enabled } locals { # The usage of the specific kubernetes.io/cluster/* resource tags below are required # for EKS and Kubernetes to discover and manage networking resources # https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html#base-vpc-networking - tags = "${merge(var.tags, map("kubernetes.io/cluster/${module.label.id}", "shared"))}" + tags = merge(var.tags, map("kubernetes.io/cluster/${module.label.id}", "shared")) } data "aws_availability_zones" "available" {} module "vpc" { source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=tags/0.3.4" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - attributes = "${var.attributes}" - tags = "${local.tags}" - cidr_block = "${var.vpc_cidr_block}" + namespace = var.namespace + stage = var.stage + name = var.name + attributes = var.attributes + tags = local.tags + cidr_block = var.vpc_cidr_block } module "subnets" { source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.3.6" availability_zones = ["${data.aws_availability_zones.available.names}"] - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - attributes = "${var.attributes}" - tags = "${local.tags}" - region = "${var.region}" - vpc_id = "${module.vpc.vpc_id}" - igw_id = "${module.vpc.igw_id}" - cidr_block = "${module.vpc.vpc_cidr_block}" + namespace = var.namespace + stage = var.stage + name = var.name + attributes = var.attributes + tags = local.tags + region = var.region + vpc_id = module.vpc.vpc_id + igw_id = module.vpc.igw_id + cidr_block = module.vpc.vpc_cidr_block nat_gateway_enabled = "true" } module "eks_cluster" { source = "git::https://github.com/cloudposse/terraform-aws-eks-cluster.git?ref=tags/0.1.1" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - attributes = "${var.attributes}" - tags = "${var.tags}" - vpc_id = "${module.vpc.vpc_id}" + namespace = var.namespace + stage = var.stage + name = var.name + attributes = var.attributes + tags = var.tags + vpc_id = module.vpc.vpc_id subnet_ids = ["${module.subnets.public_subnet_ids}"] allowed_security_groups = ["${distinct(compact(concat(var.allowed_security_groups_cluster, list(module.eks_workers.security_group_id))))}"] allowed_cidr_blocks = ["${var.allowed_cidr_blocks_cluster}"] - enabled = "${var.enabled}" + enabled = var.enabled } module "eks_workers" { source = "git::https://github.com/cloudposse/terraform-aws-eks-workers.git?ref=tags/0.1.1" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - attributes = "${var.attributes}" - tags = "${var.tags}" - image_id = "${var.image_id}" - eks_worker_ami_name_filter = "${var.eks_worker_ami_name_filter}" - instance_type = "${var.instance_type}" - vpc_id = "${module.vpc.vpc_id}" + namespace = var.namespace + stage = var.stage + name = var.name + attributes = var.attributes + tags = var.tags + image_id = var.image_id + eks_worker_ami_name_filter = var.eks_worker_ami_name_filter + instance_type = var.instance_type + vpc_id = module.vpc.vpc_id subnet_ids = ["${module.subnets.public_subnet_ids}"] - health_check_type = "${var.health_check_type}" - min_size = "${var.min_size}" - max_size = "${var.max_size}" - wait_for_capacity_timeout = "${var.wait_for_capacity_timeout}" - associate_public_ip_address = "${var.associate_public_ip_address}" - cluster_name = "${module.eks_cluster.eks_cluster_id}" - cluster_endpoint = "${module.eks_cluster.eks_cluster_endpoint}" - cluster_certificate_authority_data = "${module.eks_cluster.eks_cluster_certificate_authority_data}" - cluster_security_group_id = "${module.eks_cluster.security_group_id}" + health_check_type = var.health_check_type + min_size = var.min_size + max_size = var.max_size + wait_for_capacity_timeout = var.wait_for_capacity_timeout + associate_public_ip_address = var.associate_public_ip_address + cluster_name = module.eks_cluster.eks_cluster_id + cluster_endpoint = module.eks_cluster.eks_cluster_endpoint + cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data + cluster_security_group_id = module.eks_cluster.security_group_id allowed_security_groups = ["${var.allowed_security_groups_workers}"] allowed_cidr_blocks = ["${var.allowed_cidr_blocks_workers}"] - enabled = "${var.enabled}" + enabled = var.enabled # Auto-scaling policies and CloudWatch metric alarms - autoscaling_policies_enabled = "${var.autoscaling_policies_enabled}" - cpu_utilization_high_threshold_percent = "${var.cpu_utilization_high_threshold_percent}" - cpu_utilization_low_threshold_percent = "${var.cpu_utilization_low_threshold_percent}" + autoscaling_policies_enabled = var.autoscaling_policies_enabled + cpu_utilization_high_threshold_percent = var.cpu_utilization_high_threshold_percent + cpu_utilization_low_threshold_percent = var.cpu_utilization_low_threshold_percent } diff --git a/deprecated/aws/eks/kubectl.tf b/deprecated/aws/eks/kubectl.tf index 34afe77e0..38e03c9c6 100644 --- a/deprecated/aws/eks/kubectl.tf +++ b/deprecated/aws/eks/kubectl.tf @@ -4,26 +4,26 @@ locals { } resource "local_file" "kubeconfig" { - count = "${var.enabled == "true" && var.apply_config_map_aws_auth == "true" ? 1 : 0}" - content = "${module.eks_cluster.kubeconfig}" - filename = "${local.kubeconfig_filename}" + count = var.enabled == "true" && var.apply_config_map_aws_auth == "true" ? 1 : 0 + content = module.eks_cluster.kubeconfig + filename = local.kubeconfig_filename } resource "local_file" "config_map_aws_auth" { - count = "${var.enabled == "true" && var.apply_config_map_aws_auth == "true" ? 1 : 0}" - content = "${module.eks_workers.config_map_aws_auth}" - filename = "${local.config_map_aws_auth_filename}" + count = var.enabled == "true" && var.apply_config_map_aws_auth == "true" ? 1 : 0 + content = module.eks_workers.config_map_aws_auth + filename = local.config_map_aws_auth_filename } resource "null_resource" "apply_config_map_aws_auth" { - count = "${var.enabled == "true" && var.apply_config_map_aws_auth == "true" ? 1 : 0}" + count = var.enabled == "true" && var.apply_config_map_aws_auth == "true" ? 1 : 0 provisioner "local-exec" { command = "kubectl apply -f ${local.config_map_aws_auth_filename} --kubeconfig ${local.kubeconfig_filename}" } triggers { - kubeconfig_rendered = "${module.eks_cluster.kubeconfig}" - config_map_aws_auth_rendered = "${module.eks_workers.config_map_aws_auth}" + kubeconfig_rendered = module.eks_cluster.kubeconfig + config_map_aws_auth_rendered = module.eks_workers.config_map_aws_auth } } diff --git a/deprecated/aws/eks/main.tf b/deprecated/aws/eks/main.tf index ba39349f9..547a80599 100644 --- a/deprecated/aws/eks/main.tf +++ b/deprecated/aws/eks/main.tf @@ -5,11 +5,11 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } diff --git a/deprecated/aws/eks/outputs.tf b/deprecated/aws/eks/outputs.tf index cb9c921dd..1f926824c 100644 --- a/deprecated/aws/eks/outputs.tf +++ b/deprecated/aws/eks/outputs.tf @@ -1,119 +1,119 @@ output "kubeconfig" { description = "`kubeconfig` configuration to connect to the cluster using `kubectl`. https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html#obtaining-kubectl-configuration-from-terraform" - value = "${module.eks_cluster.kubeconfig}" + value = module.eks_cluster.kubeconfig } output "config_map_aws_auth" { description = "Kubernetes ConfigMap configuration to allow the worker nodes to join the EKS cluster. https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html#required-kubernetes-configuration-to-join-worker-nodes" - value = "${module.eks_workers.config_map_aws_auth}" + value = module.eks_workers.config_map_aws_auth } output "eks_cluster_security_group_id" { description = "ID of the EKS cluster Security Group" - value = "${module.eks_cluster.security_group_id}" + value = module.eks_cluster.security_group_id } output "eks_cluster_security_group_arn" { description = "ARN of the EKS cluster Security Group" - value = "${module.eks_cluster.security_group_arn}" + value = module.eks_cluster.security_group_arn } output "eks_cluster_security_group_name" { description = "Name of the EKS cluster Security Group" - value = "${module.eks_cluster.security_group_name}" + value = module.eks_cluster.security_group_name } output "eks_cluster_id" { description = "The name of the cluster" - value = "${module.eks_cluster.eks_cluster_id}" + value = module.eks_cluster.eks_cluster_id } output "eks_cluster_arn" { description = "The Amazon Resource Name (ARN) of the cluster" - value = "${module.eks_cluster.eks_cluster_arn}" + value = module.eks_cluster.eks_cluster_arn } output "eks_cluster_certificate_authority_data" { description = "The base64 encoded certificate data required to communicate with the cluster" - value = "${module.eks_cluster.eks_cluster_certificate_authority_data}" + value = module.eks_cluster.eks_cluster_certificate_authority_data } output "eks_cluster_endpoint" { description = "The endpoint for the Kubernetes API server" - value = "${module.eks_cluster.eks_cluster_endpoint}" + value = module.eks_cluster.eks_cluster_endpoint } output "eks_cluster_version" { description = "The Kubernetes server version of the cluster" - value = "${module.eks_cluster.eks_cluster_version}" + value = module.eks_cluster.eks_cluster_version } output "workers_launch_template_id" { description = "ID of the launch template" - value = "${module.eks_workers.launch_template_id}" + value = module.eks_workers.launch_template_id } output "workers_launch_template_arn" { description = "ARN of the launch template" - value = "${module.eks_workers.launch_template_arn}" + value = module.eks_workers.launch_template_arn } output "workers_autoscaling_group_id" { description = "The AutoScaling Group ID" - value = "${module.eks_workers.autoscaling_group_id}" + value = module.eks_workers.autoscaling_group_id } output "workers_autoscaling_group_name" { description = "The AutoScaling Group name" - value = "${module.eks_workers.autoscaling_group_name}" + value = module.eks_workers.autoscaling_group_name } output "workers_autoscaling_group_arn" { description = "ARN of the AutoScaling Group" - value = "${module.eks_workers.autoscaling_group_arn}" + value = module.eks_workers.autoscaling_group_arn } output "workers_autoscaling_group_min_size" { description = "The minimum size of the AutoScaling Group" - value = "${module.eks_workers.autoscaling_group_min_size}" + value = module.eks_workers.autoscaling_group_min_size } output "workers_autoscaling_group_max_size" { description = "The maximum size of the AutoScaling Group" - value = "${module.eks_workers.autoscaling_group_max_size}" + value = module.eks_workers.autoscaling_group_max_size } output "workers_autoscaling_group_desired_capacity" { description = "The number of Amazon EC2 instances that should be running in the group" - value = "${module.eks_workers.autoscaling_group_desired_capacity}" + value = module.eks_workers.autoscaling_group_desired_capacity } output "workers_autoscaling_group_default_cooldown" { description = "Time between a scaling activity and the succeeding scaling activity" - value = "${module.eks_workers.autoscaling_group_default_cooldown}" + value = module.eks_workers.autoscaling_group_default_cooldown } output "workers_autoscaling_group_health_check_grace_period" { description = "Time after instance comes into service before checking health" - value = "${module.eks_workers.autoscaling_group_health_check_grace_period}" + value = module.eks_workers.autoscaling_group_health_check_grace_period } output "workers_autoscaling_group_health_check_type" { description = "`EC2` or `ELB`. Controls how health checking is done" - value = "${module.eks_workers.autoscaling_group_health_check_type}" + value = module.eks_workers.autoscaling_group_health_check_type } output "workers_security_group_id" { description = "ID of the worker nodes Security Group" - value = "${module.eks_workers.security_group_id}" + value = module.eks_workers.security_group_id } output "workers_security_group_arn" { description = "ARN of the worker nodes Security Group" - value = "${module.eks_workers.security_group_arn}" + value = module.eks_workers.security_group_arn } output "workers_security_group_name" { description = "Name of the worker nodes Security Group" - value = "${module.eks_workers.security_group_name}" + value = module.eks_workers.security_group_name } diff --git a/deprecated/aws/eks/variables.tf b/deprecated/aws/eks/variables.tf index 831b30d05..251ef26d1 100644 --- a/deprecated/aws/eks/variables.tf +++ b/deprecated/aws/eks/variables.tf @@ -1,98 +1,98 @@ variable "namespace" { - type = "string" + type = string description = "Namespace, which could be your organization name, e.g. 'eg' or 'cp'" } variable "stage" { - type = "string" + type = string description = "Stage, e.g. 'prod', 'staging', 'dev' or 'testing'" } variable "name" { - type = "string" + type = string default = "eks" description = "Solution name, e.g. 'app' or 'cluster'" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter to be used between `name`, `namespace`, `stage`, etc." } variable "attributes" { - type = "list" + type = list(string) default = [] description = "Additional attributes (e.g. `1`)" } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags (e.g. `map('BusinessUnit`,`XYZ`)" } variable "enabled" { - type = "string" + type = string description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources" default = "true" } variable "allowed_security_groups_cluster" { - type = "list" + type = list(string) default = [] description = "List of Security Group IDs to be allowed to connect to the EKS cluster" } variable "allowed_security_groups_workers" { - type = "list" + type = list(string) default = [] description = "List of Security Group IDs to be allowed to connect to the worker nodes" } variable "allowed_cidr_blocks_cluster" { - type = "list" + type = list(string) default = [] description = "List of CIDR blocks to be allowed to connect to the EKS cluster" } variable "allowed_cidr_blocks_workers" { - type = "list" + type = list(string) default = [] description = "List of CIDR blocks to be allowed to connect to the worker nodes" } variable "region" { - type = "string" + type = string description = "AWS Region" } variable "vpc_cidr_block" { - type = "string" + type = string default = "172.30.0.0/16" description = "VPC CIDR block. See https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html for more details" } variable "image_id" { - type = "string" + type = string default = "" description = "EC2 image ID to launch. If not provided, the module will lookup the most recent EKS AMI. See https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html for more details on EKS-optimized images" } variable "eks_worker_ami_name_filter" { - type = "string" + type = string description = "AMI name filter to lookup the most recent EKS AMI if `image_id` is not provided" default = "amazon-eks-node-v*" } variable "instance_type" { - type = "string" + type = string default = "t2.medium" description = "Instance type to launch" } variable "health_check_type" { - type = "string" + type = string description = "Controls how health checking is done. Valid values are `EC2` or `ELB`" default = "EC2" } @@ -108,7 +108,7 @@ variable "min_size" { } variable "wait_for_capacity_timeout" { - type = "string" + type = string description = "A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. Setting this to '0' causes Terraform to skip all Capacity Waiting behavior" default = "10m" } @@ -119,25 +119,25 @@ variable "associate_public_ip_address" { } variable "autoscaling_policies_enabled" { - type = "string" + type = string default = "true" description = "Whether to create `aws_autoscaling_policy` and `aws_cloudwatch_metric_alarm` resources to control Auto Scaling" } variable "cpu_utilization_high_threshold_percent" { - type = "string" + type = string default = "80" description = "Worker nodes AutoScaling Group CPU utilization high threshold percent" } variable "cpu_utilization_low_threshold_percent" { - type = "string" + type = string default = "20" description = "Worker nodes AutoScaling Group CPU utilization low threshold percent" } variable "apply_config_map_aws_auth" { - type = "string" + type = string default = "true" description = "Whether to generate local files from `kubeconfig` and `config_map_aws_auth` and perform `kubectl apply` to apply the ConfigMap to allow the worker nodes to join the EKS cluster" } diff --git a/deprecated/aws/grafana-backing-services/README.md b/deprecated/aws/grafana-backing-services/README.md index 02b7e4707..e72d7f22d 100644 --- a/deprecated/aws/grafana-backing-services/README.md +++ b/deprecated/aws/grafana-backing-services/README.md @@ -23,7 +23,7 @@ access to all the cluster's resources through Kubernetes. ### SSL Server Certificate Validation -To get the Aurora MySQL SSL connection to validate: +To get the Aurora MySQL SSL connection to validate: 1. Get the RDS CA from https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem (expires Mar 5 09:11:31 2020 GMT) or successor (consult current RDS documentation) 2. Save it in a `ConfigMap` diff --git a/deprecated/aws/grafana-backing-services/aurora-mysql.tf b/deprecated/aws/grafana-backing-services/aurora-mysql.tf index 049d676d2..dd18c3096 100644 --- a/deprecated/aws/grafana-backing-services/aurora-mysql.tf +++ b/deprecated/aws/grafana-backing-services/aurora-mysql.tf @@ -87,7 +87,7 @@ resource "random_string" "mysql_admin_password" { # "Read SSM parameter to get allowed CIDR blocks" data "aws_ssm_parameter" "allowed_cidr_blocks" { # The data source will throw an error if it cannot find the parameter, - # so do not reference it unless it is neeeded. + # so do not reference it unless it is needed. count = local.allowed_cidr_blocks_use_ssm ? 1 : 0 # name = substr(mysql_cluster_allowed_cidr_blocks, 0, 1) == "/" ? mysql_cluster_allowed_cidr_blocks : "/aws/service/global-infrastructure/version" @@ -97,7 +97,7 @@ data "aws_ssm_parameter" "allowed_cidr_blocks" { # "Read SSM parameter to get allowed VPC ID" data "aws_ssm_parameter" "vpc_id" { # The data source will throw an error if it cannot find the parameter, - # so do not reference it unless it is neeeded. + # so do not reference it unless it is needed. count = local.vpc_id_use_ssm ? 1 : 0 name = var.vpc_id } @@ -105,7 +105,7 @@ data "aws_ssm_parameter" "vpc_id" { # "Read SSM parameter to get allowed VPC subnet IDs" data "aws_ssm_parameter" "vpc_subnet_ids" { # The data source will throw an error if it cannot find the parameter, - # so do not reference it unless it is neeeded. + # so do not reference it unless it is needed. count = local.vpc_subnet_ids_use_ssm ? 1 : 0 name = var.vpc_subnet_ids } diff --git a/deprecated/aws/iam/audit.tf b/deprecated/aws/iam/audit.tf index f3791f7b6..0045f9872 100644 --- a/deprecated/aws/iam/audit.tf +++ b/deprecated/aws/iam/audit.tf @@ -1,5 +1,5 @@ variable "audit_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `audit` account" default = [] } @@ -7,18 +7,18 @@ variable "audit_account_user_names" { # Provision group access to audit account. Careful! Very few people, if any should have access to this account. module "organization_access_group_audit" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "audit") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "audit") == true ? "true" : "false" + namespace = var.namespace stage = "audit" name = "admin" - user_names = "${var.audit_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.audit_account_id}" + user_names = var.audit_account_user_names + member_account_id = data.terraform_remote_state.accounts.audit_account_id require_mfa = "true" } module "organization_access_group_ssm_audit" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "audit") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "audit") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_audit" { output "audit_switchrole_url" { description = "URL to the IAM console to switch to the audit account organization access role" - value = "${module.organization_access_group_audit.switchrole_url}" + value = module.organization_access_group_audit.switchrole_url } diff --git a/deprecated/aws/iam/corp.tf b/deprecated/aws/iam/corp.tf index de4142758..5c0f0417c 100644 --- a/deprecated/aws/iam/corp.tf +++ b/deprecated/aws/iam/corp.tf @@ -1,5 +1,5 @@ variable "corp_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `corp` account" default = [] } @@ -7,18 +7,18 @@ variable "corp_account_user_names" { # Provision group access to corp account module "organization_access_group_corp" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "corp") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "corp") == true ? "true" : "false" + namespace = var.namespace stage = "corp" name = "admin" - user_names = "${var.corp_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.corp_account_id}" + user_names = var.corp_account_user_names + member_account_id = data.terraform_remote_state.accounts.corp_account_id require_mfa = "true" } module "organization_access_group_ssm_corp" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "corp") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "corp") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_corp" { output "corp_switchrole_url" { description = "URL to the IAM console to switch to the corp account organization access role" - value = "${module.organization_access_group_corp.switchrole_url}" + value = module.organization_access_group_corp.switchrole_url } diff --git a/deprecated/aws/iam/data.tf b/deprecated/aws/iam/data.tf index 5525b646a..43150f102 100644 --- a/deprecated/aws/iam/data.tf +++ b/deprecated/aws/iam/data.tf @@ -1,5 +1,5 @@ variable "data_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `data` account" default = [] } @@ -7,18 +7,18 @@ variable "data_account_user_names" { # Provision group access to data account module "organization_access_group_data" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "data") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "data") == true ? "true" : "false" + namespace = var.namespace stage = "data" name = "admin" - user_names = "${var.data_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.data_account_id}" + user_names = var.data_account_user_names + member_account_id = data.terraform_remote_state.accounts.data_account_id require_mfa = "true" } module "organization_access_group_ssm_data" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "data") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "data") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_data" { output "data_switchrole_url" { description = "URL to the IAM console to switch to the data account organization access role" - value = "${module.organization_access_group_data.switchrole_url}" + value = module.organization_access_group_data.switchrole_url } diff --git a/deprecated/aws/iam/dev.tf b/deprecated/aws/iam/dev.tf index 5cc223e15..2d4aff902 100644 --- a/deprecated/aws/iam/dev.tf +++ b/deprecated/aws/iam/dev.tf @@ -1,5 +1,5 @@ variable "dev_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `dev` account" default = [] } @@ -7,18 +7,18 @@ variable "dev_account_user_names" { # Provision group access to dev account module "organization_access_group_dev" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "dev") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "dev") == true ? "true" : "false" + namespace = var.namespace stage = "dev" name = "admin" - user_names = "${var.dev_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.dev_account_id}" + user_names = var.dev_account_user_names + member_account_id = data.terraform_remote_state.accounts.dev_account_id require_mfa = "true" } module "organization_access_group_ssm_dev" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "dev") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "dev") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_dev" { output "dev_switchrole_url" { description = "URL to the IAM console to switch to the dev account organization access role" - value = "${module.organization_access_group_dev.switchrole_url}" + value = module.organization_access_group_dev.switchrole_url } diff --git a/deprecated/aws/iam/identity.tf b/deprecated/aws/iam/identity.tf index 49e91853c..fca252d46 100644 --- a/deprecated/aws/iam/identity.tf +++ b/deprecated/aws/iam/identity.tf @@ -1,5 +1,5 @@ variable "identity_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `identity` account" default = [] } @@ -7,18 +7,18 @@ variable "identity_account_user_names" { # Provision group access to identity account module "organization_access_group_identity" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "identity") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "identity") == true ? "true" : "false" + namespace = var.namespace stage = "identity" name = "admin" - user_names = "${var.identity_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.identity_account_id}" + user_names = var.identity_account_user_names + member_account_id = data.terraform_remote_state.accounts.identity_account_id require_mfa = "true" } module "organization_access_group_ssm_identity" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "identity") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "identity") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_identity" { output "identity_switchrole_url" { description = "URL to the IAM console to switch to the identity account organization access role" - value = "${module.organization_access_group_identity.switchrole_url}" + value = module.organization_access_group_identity.switchrole_url } diff --git a/deprecated/aws/iam/main.tf b/deprecated/aws/iam/main.tf index 6f8d5bd96..cdf6f584c 100644 --- a/deprecated/aws/iam/main.tf +++ b/deprecated/aws/iam/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } diff --git a/deprecated/aws/iam/prod.tf b/deprecated/aws/iam/prod.tf index 2777c1b5f..5021d3f8e 100644 --- a/deprecated/aws/iam/prod.tf +++ b/deprecated/aws/iam/prod.tf @@ -1,5 +1,5 @@ variable "prod_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `prod` account" default = [] } @@ -7,18 +7,18 @@ variable "prod_account_user_names" { # Provision group access to production account module "organization_access_group_prod" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "prod") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "prod") == true ? "true" : "false" + namespace = var.namespace stage = "prod" name = "admin" - user_names = "${var.prod_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.prod_account_id}" + user_names = var.prod_account_user_names + member_account_id = data.terraform_remote_state.accounts.prod_account_id require_mfa = "true" } module "organization_access_group_ssm_prod" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "prod") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "prod") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_prod" { output "prod_switchrole_url" { description = "URL to the IAM console to switch to the prod account organization access role" - value = "${module.organization_access_group_prod.switchrole_url}" + value = module.organization_access_group_prod.switchrole_url } diff --git a/deprecated/aws/iam/security.tf b/deprecated/aws/iam/security.tf index 5b5374773..9a71504ac 100644 --- a/deprecated/aws/iam/security.tf +++ b/deprecated/aws/iam/security.tf @@ -1,5 +1,5 @@ variable "security_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `security` account" default = [] } @@ -7,18 +7,18 @@ variable "security_account_user_names" { # Provision group access to security account module "organization_access_group_security" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "security") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "security") == true ? "true" : "false" + namespace = var.namespace stage = "security" name = "admin" - user_names = "${var.security_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.security_account_id}" + user_names = var.security_account_user_names + member_account_id = data.terraform_remote_state.accounts.security_account_id require_mfa = "true" } module "organization_access_group_ssm_security" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "security") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "security") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_security" { output "security_switchrole_url" { description = "URL to the IAM console to switch to the security account organization access role" - value = "${module.organization_access_group_security.switchrole_url}" + value = module.organization_access_group_security.switchrole_url } diff --git a/deprecated/aws/iam/staging.tf b/deprecated/aws/iam/staging.tf index f9e2f87c1..0b4963760 100644 --- a/deprecated/aws/iam/staging.tf +++ b/deprecated/aws/iam/staging.tf @@ -1,5 +1,5 @@ variable "staging_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `staging` account" default = [] } @@ -7,18 +7,18 @@ variable "staging_account_user_names" { # Provision group access to staging account module "organization_access_group_staging" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "staging") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "staging") == true ? "true" : "false" + namespace = var.namespace stage = "staging" name = "admin" - user_names = "${var.staging_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.staging_account_id}" + user_names = var.staging_account_user_names + member_account_id = data.terraform_remote_state.accounts.staging_account_id require_mfa = "true" } module "organization_access_group_ssm_staging" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "staging") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "staging") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_staging" { output "staging_switchrole_url" { description = "URL to the IAM console to switch to the staging account organization access role" - value = "${module.organization_access_group_staging.switchrole_url}" + value = module.organization_access_group_staging.switchrole_url } diff --git a/deprecated/aws/iam/testing.tf b/deprecated/aws/iam/testing.tf index d19ec7a03..f568994c0 100644 --- a/deprecated/aws/iam/testing.tf +++ b/deprecated/aws/iam/testing.tf @@ -1,5 +1,5 @@ variable "testing_account_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant access to the `testing` account" default = [] } @@ -7,18 +7,18 @@ variable "testing_account_user_names" { # Provision group access to testing account module "organization_access_group_testing" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.accounts_enabled, "testing") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.accounts_enabled, "testing") == true ? "true" : "false" + namespace = var.namespace stage = "testing" name = "admin" - user_names = "${var.testing_account_user_names}" - member_account_id = "${data.terraform_remote_state.accounts.testing_account_id}" + user_names = var.testing_account_user_names + member_account_id = data.terraform_remote_state.accounts.testing_account_id require_mfa = "true" } module "organization_access_group_ssm_testing" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - enabled = "${contains(var.accounts_enabled, "testing") == true ? "true" : "false"}" + enabled = contains(var.accounts_enabled, "testing") == true ? "true" : "false" parameter_write = [ { @@ -33,5 +33,5 @@ module "organization_access_group_ssm_testing" { output "testing_switchrole_url" { description = "URL to the IAM console to switch to the testing account organization access role" - value = "${module.organization_access_group_testing.switchrole_url}" + value = module.organization_access_group_testing.switchrole_url } diff --git a/deprecated/aws/iam/variables.tf b/deprecated/aws/iam/variables.tf index bb2b1ca23..c88f445aa 100644 --- a/deprecated/aws/iam/variables.tf +++ b/deprecated/aws/iam/variables.tf @@ -1,19 +1,19 @@ variable "accounts_enabled" { - type = "list" + type = list(string) description = "Accounts to enable" default = ["dev", "staging", "prod", "testing", "audit"] } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage, e.g. 'prod', 'staging', 'dev', or 'test'" } diff --git a/deprecated/aws/keycloak-backing-services/README.md b/deprecated/aws/keycloak-backing-services/README.md index 1fe240acd..3a851b29f 100644 --- a/deprecated/aws/keycloak-backing-services/README.md +++ b/deprecated/aws/keycloak-backing-services/README.md @@ -8,7 +8,7 @@ As of this writing, this only provisions an Aurora MySQL 5.7 database. ### Database encryption -This module, as of this writing, provisions a database that is **not** encrypted. +This module, as of this writing, provisions a database that is **not** encrypted. This means that database backups/snapshots are also unencrypted. The database, and of course the backups, contain secrets that an attacker could use to gain access to anything protected by Keycloak. @@ -22,7 +22,7 @@ key would also be available to someone with the right IAM credentials. As a practical matter, anyone with access to the backups will likely also have access to the encryption key via KMS, or be able to access the database directly after getting the user and password from SSM, or be able to -execute commands in the Keycloak pod/container that expose the secrets. +execute commands in the Keycloak pod/container that expose the secrets. ### SSL Server Certificate Validation @@ -43,29 +43,29 @@ an authorized local service. To keep the database encrypted, this module will have to be extended: 1 Create a KMS key for encrypting the database. Using the RDS default key is not advisable since the only practical advantage of the key comes from -limiting access to it, and the default key will likey have relatively +limiting access to it, and the default key will likely have relatively wide access. 1. Create an IAM role for Keycloak that has access to the key. Nodes running `kiam-server` will need to be able to assume this role. 2. Enable encryption for the database using this key. -Then the Keycloak deployment (actually `StatefulSet`) will need to be -annotated so that `kiam` grants Keycloak access to this role. +Then the Keycloak deployment (actually `StatefulSet`) will need to be +annotated so that `kiam` grants Keycloak access to this role. ### SSL Server Certificate Validation -To get the RDS MySQL SSL connection to validate: +To get the RDS MySQL SSL connection to validate: 1. Get the RDS CA from https://s3.amazonaws.com/rds-downloads/rds-ca-2015-root.pem expires (Mar 5 09:11:31 2020 GMT) or successor (consult current RDS documentation) -2. Import it into a Java KeyStore (JKS) +2. Import it into a Java KeyStore (JKS) * Run`keytool -importcert -alias MySQLCACert -file ca.pem -keystore truststore -storepass mypassword` in a Keycloak container in order to be sure to get a compatible version of the Java SDK `keytool` 3. Copy the KeyStore into a secret 4. Mount the Secret 5. Set [`JDBC_PARAMS` environment variable](https://github.com/jboss-dockerfiles/keycloak/blob/119fb1f61a477ec217ba71c18c3a71a10e8d5575/server/tools/cli/databases/mysql/change-database.cli#L2 ) to `?clientCertificateKeyStoreUrl=file:///path-to-keystore&clientCertificateKeyStorePassword=mypassword` -6. Note that it would seem to be more appropriate to set to +6. Note that it would seem to be more appropriate to set to `?trustCertificateKeyStoreUrl=file:///path-to-keystore&trustCertificateKeyStorePassword=mypassword` - but the [documentation](https://dev.mysql.com/doc/connector-j/5.1/en/connector-j-reference-using-ssl.html) + but the [documentation](https://dev.mysql.com/doc/connector-j/5.1/en/connector-j-reference-using-ssl.html) [consistently](https://dev.mysql.com/doc/connector-j/5.1/en/connector-j-reference-configuration-properties.html) says to use the `clientCertificate*` stuff for verifying the server. diff --git a/deprecated/aws/keycloak-backing-services/aurora-mysql.tf b/deprecated/aws/keycloak-backing-services/aurora-mysql.tf index bf87e1f9a..c420bea54 100644 --- a/deprecated/aws/keycloak-backing-services/aurora-mysql.tf +++ b/deprecated/aws/keycloak-backing-services/aurora-mysql.tf @@ -1,44 +1,44 @@ # https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html variable "mysql_name" { - type = "string" + type = string description = "Name of the application, e.g. `app` or `analytics`" default = "mysql" } variable "mysql_admin_user" { - type = "string" + type = string description = "MySQL admin user name" default = "" } variable "mysql_admin_password" { - type = "string" + type = string description = "MySQL password for the admin user" default = "" } variable "mysql_db_name" { - type = "string" + type = string description = "MySQL database name" default = "" } # https://aws.amazon.com/rds/aurora/pricing variable "mysql_instance_type" { - type = "string" + type = string default = "db.t3.small" description = "EC2 instance type for Aurora MySQL cluster" } variable "mysql_cluster_size" { - type = "string" + type = string default = "2" description = "MySQL cluster size" } variable "mysql_cluster_enabled" { - type = "string" + type = string default = "false" description = "Set to false to prevent the module from creating any resources" } @@ -49,108 +49,108 @@ variable "mysql_cluster_publicly_accessible" { } variable "mysql_cluster_allowed_cidr_blocks" { - type = "string" + type = string default = "0.0.0.0/0" description = "Comma separated string list of CIDR blocks allowed to access the cluster, or SSM parameter key for it" } variable "mysql_storage_encrypted" { - type = "string" + type = string default = "false" description = "Set to true to keep the database contents encrypted" } variable "mysql_kms_key_id" { - type = "string" + type = string default = "alias/aws/rds" description = "KMS key ID, ARN, or alias to use for encrypting MySQL database" } variable "mysql_deletion_protection" { - type = "string" + type = string default = "true" description = "Set to true to protect the database from deletion" } variable "mysql_skip_final_snapshot" { - type = "string" + type = string default = "false" description = "Determines whether a final DB snapshot is created before the DB cluster is deleted" } variable "vpc_id" { - type = "string" + type = string description = "The AWS ID of the VPC to create the cluster in, or SSM parameter key for it" } variable "vpc_subnet_ids" { - type = "string" + type = string description = "Comma separated string list of AWS Subnet IDs in which to place the database, or SSM parameter key for it" } resource "random_pet" "mysql_db_name" { - count = "${local.mysql_cluster_enabled && length(var.mysql_db_name) == 0 ? 1 : 0}" + count = local.mysql_cluster_enabled && length(var.mysql_db_name) == 0 ? 1 : 0 separator = "_" } resource "random_string" "mysql_admin_user" { - count = "${local.mysql_cluster_enabled && length(var.mysql_admin_user) == 0 ? 1 : 0}" + count = local.mysql_cluster_enabled && length(var.mysql_admin_user) == 0 ? 1 : 0 length = 8 number = false special = false } resource "random_string" "mysql_admin_password" { - count = "${local.mysql_cluster_enabled && length(var.mysql_admin_password) == 0 ? 1 : 0}" + count = local.mysql_cluster_enabled && length(var.mysql_admin_password) == 0 ? 1 : 0 length = 33 special = false } # "Read SSM parameter to get allowed CIDR blocks" data "aws_ssm_parameter" "allowed_cidr_blocks" { - count = "${local.allowed_cidr_blocks_use_ssm ? 1 : 0}" + count = local.allowed_cidr_blocks_use_ssm ? 1 : 0 # The data source will throw an error if it cannot find the parameter, # name = "${substr(mysql_cluster_allowed_cidr_blocks, 0, 1) == "/" ? mysql_cluster_allowed_cidr_blocks : "/aws/service/global-infrastructure/version"}" - name = "${var.mysql_cluster_allowed_cidr_blocks}" + name = var.mysql_cluster_allowed_cidr_blocks } # "Read SSM parameter to get allowed VPC ID" data "aws_ssm_parameter" "vpc_id" { - count = "${local.vpc_id_use_ssm ? 1 : 0}" + count = local.vpc_id_use_ssm ? 1 : 0 # The data source will throw an error if it cannot find the parameter, # name = "${substr(mysql_cluster_allowed_cidr_blocks, 0, 1) == "/" ? mysql_cluster_allowed_cidr_blocks : "/aws/service/global-infrastructure/version"}" - name = "${var.vpc_id}" + name = var.vpc_id } # "Read SSM parameter to get allowed VPC subnet IDs" data "aws_ssm_parameter" "vpc_subnet_ids" { - count = "${local.vpc_subnet_ids_use_ssm ? 1 : 0}" + count = local.vpc_subnet_ids_use_ssm ? 1 : 0 # The data source will throw an error if it cannot find the parameter, # name = "${substr(mysql_cluster_allowed_cidr_blocks, 0, 1) == "/" ? mysql_cluster_allowed_cidr_blocks : "/aws/service/global-infrastructure/version"}" - name = "${var.vpc_subnet_ids}" + name = var.vpc_subnet_ids } locals { - mysql_cluster_enabled = "${var.mysql_cluster_enabled == "true"}" - mysql_admin_user = "${length(var.mysql_admin_user) > 0 ? var.mysql_admin_user : join("", random_string.mysql_admin_user.*.result)}" - mysql_admin_password = "${length(var.mysql_admin_password) > 0 ? var.mysql_admin_password : join("", random_string.mysql_admin_password.*.result)}" - mysql_db_name = "${length(var.mysql_db_name) > 0 ? var.mysql_db_name : join("", random_pet.mysql_db_name.*.id)}" + mysql_cluster_enabled = var.mysql_cluster_enabled == "true" + mysql_admin_user = length(var.mysql_admin_user) > 0 ? var.mysql_admin_user : join("", random_string.mysql_admin_user.*.result) + mysql_admin_password = length(var.mysql_admin_password) > 0 ? var.mysql_admin_password : join("", random_string.mysql_admin_password.*.result) + mysql_db_name = length(var.mysql_db_name) > 0 ? var.mysql_db_name : join("", random_pet.mysql_db_name.*.id) - allowed_cidr_blocks_use_ssm = "${substr(var.mysql_cluster_allowed_cidr_blocks, 0, 1) == "/" && local.mysql_cluster_enabled}" - vpc_id_use_ssm = "${substr(var.vpc_id, 0, 1) == "/" && local.mysql_cluster_enabled}" - vpc_subnet_ids_use_ssm = "${substr(var.vpc_subnet_ids, 0, 1) == "/" && local.mysql_cluster_enabled}" + allowed_cidr_blocks_use_ssm = substr(var.mysql_cluster_allowed_cidr_blocks, 0, 1) == "/" && local.mysql_cluster_enabled + vpc_id_use_ssm = substr(var.vpc_id, 0, 1) == "/" && local.mysql_cluster_enabled + vpc_subnet_ids_use_ssm = substr(var.vpc_subnet_ids, 0, 1) == "/" && local.mysql_cluster_enabled - allowed_cidr_blocks_string = "${local.allowed_cidr_blocks_use_ssm ? join("", data.aws_ssm_parameter.allowed_cidr_blocks.*.value) : var.mysql_cluster_allowed_cidr_blocks}" - vpc_subnet_ids_string = "${local.vpc_subnet_ids_use_ssm ? join("", data.aws_ssm_parameter.vpc_subnet_ids.*.value) : var.vpc_subnet_ids}" + allowed_cidr_blocks_string = local.allowed_cidr_blocks_use_ssm ? join("", data.aws_ssm_parameter.allowed_cidr_blocks.*.value) : var.mysql_cluster_allowed_cidr_blocks + vpc_subnet_ids_string = local.vpc_subnet_ids_use_ssm ? join("", data.aws_ssm_parameter.vpc_subnet_ids.*.value) : var.vpc_subnet_ids allowed_cidr_blocks = [ "${split(",", local.allowed_cidr_blocks_string)}", ] - vpc_id = "${local.vpc_id_use_ssm ? join("", data.aws_ssm_parameter.vpc_id.*.value) : var.vpc_id}" + vpc_id = local.vpc_id_use_ssm ? join("", data.aws_ssm_parameter.vpc_id.*.value) : var.vpc_id vpc_subnet_ids = [ "${split(",", local.vpc_subnet_ids_string)}", @@ -158,76 +158,76 @@ locals { } data "aws_kms_key" "mysql" { - key_id = "${var.mysql_kms_key_id}" + key_id = var.mysql_kms_key_id } module "aurora_mysql" { source = "git::https://github.com/cloudposse/terraform-aws-rds-cluster.git?ref=tags/0.15.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.mysql_name}" + namespace = var.namespace + stage = var.stage + name = var.mysql_name attributes = ["keycloak"] engine = "aurora-mysql" cluster_family = "aurora-mysql5.7" - instance_type = "${var.mysql_instance_type}" - cluster_size = "${var.mysql_cluster_size}" - admin_user = "${local.mysql_admin_user}" - admin_password = "${local.mysql_admin_password}" - db_name = "${local.mysql_db_name}" + instance_type = var.mysql_instance_type + cluster_size = var.mysql_cluster_size + admin_user = local.mysql_admin_user + admin_password = local.mysql_admin_password + db_name = local.mysql_db_name db_port = "3306" - vpc_id = "${local.vpc_id}" - subnets = "${local.vpc_subnet_ids}" - zone_id = "${local.dns_zone_id}" - enabled = "${var.mysql_cluster_enabled}" + vpc_id = local.vpc_id + subnets = local.vpc_subnet_ids + zone_id = local.dns_zone_id + enabled = var.mysql_cluster_enabled - storage_encrypted = "${var.mysql_storage_encrypted}" - kms_key_arn = "${var.mysql_storage_encrypted ? data.aws_kms_key.mysql.arn : ""}" - deletion_protection = "${var.mysql_deletion_protection}" - skip_final_snapshot = "${var.mysql_skip_final_snapshot}" - publicly_accessible = "${var.mysql_cluster_publicly_accessible}" - allowed_cidr_blocks = "${local.allowed_cidr_blocks}" + storage_encrypted = var.mysql_storage_encrypted + kms_key_arn = var.mysql_storage_encrypted ? data.aws_kms_key.mysql.arn : "" + deletion_protection = var.mysql_deletion_protection + skip_final_snapshot = var.mysql_skip_final_snapshot + publicly_accessible = var.mysql_cluster_publicly_accessible + allowed_cidr_blocks = local.allowed_cidr_blocks } resource "aws_ssm_parameter" "aurora_mysql_database_name" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_name")}" - value = "${module.aurora_mysql.name}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_name") + value = module.aurora_mysql.name description = "Aurora MySQL Database Name" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_master_username" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_user")}" - value = "${module.aurora_mysql.user}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_user") + value = module.aurora_mysql.user description = "Aurora MySQL Username for the master DB user" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_master_password" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_password")}" - value = "${local.mysql_admin_password}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_password") + value = local.mysql_admin_password description = "Aurora MySQL Password for the master DB user" type = "SecureString" overwrite = "true" - key_id = "${var.chamber_kms_key_id}" + key_id = var.chamber_kms_key_id } resource "aws_ssm_parameter" "aurora_mysql_master_hostname" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_host")}" - value = "${module.aurora_mysql.master_host}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_host") + value = module.aurora_mysql.master_host description = "Aurora MySQL DB Master hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_port" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_port")}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_port") value = "3306" description = "Aurora MySQL DB Master hostname" type = "String" @@ -235,44 +235,44 @@ resource "aws_ssm_parameter" "aurora_mysql_port" { } resource "aws_ssm_parameter" "aurora_mysql_replicas_hostname" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_aurora_mysql_replicas_hostname")}" - value = "${module.aurora_mysql.replicas_host}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_aurora_mysql_replicas_hostname") + value = module.aurora_mysql.replicas_host description = "Aurora MySQL DB Replicas hostname" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "aurora_mysql_cluster_name" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_aurora_mysql_cluster_name")}" - value = "${module.aurora_mysql.cluster_name}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_aurora_mysql_cluster_name") + value = module.aurora_mysql.cluster_name description = "Aurora MySQL DB Cluster Identifier" type = "String" overwrite = "true" } output "aurora_mysql_database_name" { - value = "${module.aurora_mysql.name}" + value = module.aurora_mysql.name description = "Aurora MySQL Database name" } output "aurora_mysql_master_username" { - value = "${module.aurora_mysql.user}" + value = module.aurora_mysql.user description = "Aurora MySQL Username for the master DB user" } output "aurora_mysql_master_hostname" { - value = "${module.aurora_mysql.master_host}" + value = module.aurora_mysql.master_host description = "Aurora MySQL DB Master hostname" } output "aurora_mysql_replicas_hostname" { - value = "${module.aurora_mysql.replicas_host}" + value = module.aurora_mysql.replicas_host description = "Aurora MySQL Replicas hostname" } output "aurora_mysql_cluster_name" { - value = "${module.aurora_mysql.cluster_name}" + value = module.aurora_mysql.cluster_name description = "Aurora MySQL Cluster Identifier" } diff --git a/deprecated/aws/keycloak-backing-services/main.tf b/deprecated/aws/keycloak-backing-services/main.tf index 61fb6e9be..dad8a4e4e 100644 --- a/deprecated/aws/keycloak-backing-services/main.tf +++ b/deprecated/aws/keycloak-backing-services/main.tf @@ -8,31 +8,31 @@ provider "aws" { version = "~> 2.17" assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `eg` or `cp`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "region" { - type = "string" + type = string description = "AWS region" } variable "dns_zone_name" { - type = "string" + type = string description = "The DNS domain under which to put entries for the database. Usually the same as the cluster name, e.g. us-west-2.prod.cpco.io" } @@ -47,24 +47,24 @@ variable "chamber_parameter_name_pattern" { } variable "chamber_kms_key_id" { - type = "string" + type = string default = "alias/aws/ssm" description = "KMS key ID, ARN, or alias to use for encrypting SSM secrets" } data "aws_route53_zone" "default" { - name = "${var.dns_zone_name}" + name = var.dns_zone_name } locals { // availability_zones = ["${split(",", length(var.availability_zones) == 0 ? join(",", data.aws_availability_zones.available.names) : join(",", var.availability_zones))}"] - chamber_service = "${var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service}" - dns_zone_id = "${data.aws_route53_zone.default.zone_id}" + chamber_service = var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service + dns_zone_id = data.aws_route53_zone.default.zone_id } resource "aws_ssm_parameter" "keycloak_db_vendor" { - count = "${local.mysql_cluster_enabled ? 1 : 0}" - name = "${format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_vendor")}" + count = local.mysql_cluster_enabled ? 1 : 0 + name = format(var.chamber_parameter_name_pattern, local.chamber_service, "keycloak_db_vendor") value = "mysql" description = "Database Vendor, e.g. mysql, postgres" type = "String" diff --git a/deprecated/aws/kops-aws-platform/acm.tf b/deprecated/aws/kops-aws-platform/acm.tf index 70595b197..c83aba69d 100644 --- a/deprecated/aws/kops-aws-platform/acm.tf +++ b/deprecated/aws/kops-aws-platform/acm.tf @@ -4,17 +4,17 @@ variable "kops_acm_enabled" { } variable "kops_acm_san_domains" { - type = "list" + type = list(string) default = [] description = "A list of domains (except *.{cluster_name}) that should be SANs in the issued certificate" } resource "aws_acm_certificate" "default" { - count = "${var.kops_acm_enabled ? 1 : 0}" + count = var.kops_acm_enabled ? 1 : 0 domain_name = "*.${var.region}.${var.zone_name}" validation_method = "DNS" subject_alternative_names = ["${var.kops_acm_san_domains}"] - tags = "${var.tags}" + tags = var.tags lifecycle { create_before_destroy = true @@ -22,11 +22,11 @@ resource "aws_acm_certificate" "default" { } output "kops_acm_arn" { - value = "${join("", aws_acm_certificate.default.*.arn)}" + value = join("", aws_acm_certificate.default.*.arn) description = "The ARN of the certificate" } output "kops_acm_domain_validation_options" { - value = "${flatten(aws_acm_certificate.default.*.domain_validation_options)}" + value = flatten(aws_acm_certificate.default.*.domain_validation_options) description = "CNAME records that need to be added to the DNS zone to complete certificate validation" } diff --git a/deprecated/aws/kops-aws-platform/alb-ingress.tf b/deprecated/aws/kops-aws-platform/alb-ingress.tf index 5676889bb..79f1c66da 100644 --- a/deprecated/aws/kops-aws-platform/alb-ingress.tf +++ b/deprecated/aws/kops-aws-platform/alb-ingress.tf @@ -5,13 +5,13 @@ variable "kops_alb_ingress_enabled" { module "kops_alb_ingress" { source = "git::https://github.com/cloudposse/terraform-aws-kops-aws-alb-ingress.git?ref=tags/0.2.0" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "alb-ingress" cluster_name = "${var.region}.${var.zone_name}" - enabled = "${var.kops_alb_ingress_enabled}" + enabled = var.kops_alb_ingress_enabled - iam_role_max_session_duration = "${var.iam_role_max_session_duration}" + iam_role_max_session_duration = var.iam_role_max_session_duration tags = { Cluster = "${var.region}.${var.zone_name}" @@ -19,25 +19,25 @@ module "kops_alb_ingress" { } output "kops_alb_ingress_role_name" { - value = "${module.kops_alb_ingress.role_name}" + value = module.kops_alb_ingress.role_name } output "kops_alb_ingress_role_unique_id" { - value = "${module.kops_alb_ingress.role_unique_id}" + value = module.kops_alb_ingress.role_unique_id } output "kops_alb_ingress_role_arn" { - value = "${module.kops_alb_ingress.role_arn}" + value = module.kops_alb_ingress.role_arn } output "kops_alb_ingress_policy_name" { - value = "${module.kops_alb_ingress.policy_name}" + value = module.kops_alb_ingress.policy_name } output "kops_alb_ingress_policy_id" { - value = "${module.kops_alb_ingress.policy_id}" + value = module.kops_alb_ingress.policy_id } output "kops_alb_ingress_policy_arn" { - value = "${module.kops_alb_ingress.policy_arn}" + value = module.kops_alb_ingress.policy_arn } diff --git a/deprecated/aws/kops-aws-platform/autoscaler-role.tf b/deprecated/aws/kops-aws-platform/autoscaler-role.tf index ff60180ec..ea5cc6145 100644 --- a/deprecated/aws/kops-aws-platform/autoscaler-role.tf +++ b/deprecated/aws/kops-aws-platform/autoscaler-role.tf @@ -20,14 +20,14 @@ module "autoscaler_role" { source = "git::https://github.com/cloudposse/terraform-aws-iam-role.git?ref=tags/0.4.0" enabled = "true" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "kubernetes" attributes = ["autoscaler", "role"] role_description = "Role for Cluster Auto-Scaler" policy_description = "Permit auto-scaling operations on auto-scaling groups" - max_session_duration = "${var.iam_role_max_session_duration}" + max_session_duration = var.iam_role_max_session_duration principals = { AWS = ["${module.kops_metadata_iam.masters_role_arn}"] @@ -37,24 +37,24 @@ module "autoscaler_role" { } resource "aws_ssm_parameter" "kops_autoscaler_iam_role_name" { - name = "${format(local.chamber_parameter_format, var.chamber_service, "kubernetes_autoscaler_iam_role_name")}" - value = "${module.autoscaler_role.name}" + name = format(local.chamber_parameter_format, var.chamber_service, "kubernetes_autoscaler_iam_role_name") + value = module.autoscaler_role.name description = "IAM role name for cluster autoscaler" type = "String" overwrite = "true" } output "autoscaler_role_name" { - value = "${module.autoscaler_role.name}" + value = module.autoscaler_role.name description = "The name of the IAM role created" } output "autoscaler_role_id" { - value = "${module.autoscaler_role.id}" + value = module.autoscaler_role.id description = "The stable and unique string identifying the role" } output "autoscaler_role_arn" { - value = "${module.autoscaler_role.arn}" + value = module.autoscaler_role.arn description = "The Amazon Resource Name (ARN) specifying the role" } diff --git a/deprecated/aws/kops-aws-platform/chart-repo.tf b/deprecated/aws/kops-aws-platform/chart-repo.tf index 9b5e5384c..9845b77de 100644 --- a/deprecated/aws/kops-aws-platform/chart-repo.tf +++ b/deprecated/aws/kops-aws-platform/chart-repo.tf @@ -1,12 +1,12 @@ module "kops_chart_repo" { source = "git::https://github.com/cloudposse/terraform-aws-kops-chart-repo.git?ref=tags/0.3.0" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "chart-repo" cluster_name = "${var.region}.${var.zone_name}" - permitted_nodes = "${var.permitted_nodes}" + permitted_nodes = var.permitted_nodes - iam_role_max_session_duration = "${var.iam_role_max_session_duration}" + iam_role_max_session_duration = var.iam_role_max_session_duration tags = { Cluster = "${var.region}.${var.zone_name}" @@ -14,37 +14,37 @@ module "kops_chart_repo" { } output "kops_chart_repo_bucket_domain_name" { - value = "${module.kops_chart_repo.bucket_domain_name}" + value = module.kops_chart_repo.bucket_domain_name } output "kops_chart_repo_bucket_id" { - value = "${module.kops_chart_repo.bucket_id}" + value = module.kops_chart_repo.bucket_id } output "kops_chart_repo_bucket_arn" { - value = "${module.kops_chart_repo.bucket_arn}" + value = module.kops_chart_repo.bucket_arn } output "kops_chart_repo_role_name" { - value = "${module.kops_chart_repo.role_name}" + value = module.kops_chart_repo.role_name } output "kops_chart_repo_role_unique_id" { - value = "${module.kops_chart_repo.role_unique_id}" + value = module.kops_chart_repo.role_unique_id } output "kops_chart_repo_role_arn" { - value = "${module.kops_chart_repo.role_arn}" + value = module.kops_chart_repo.role_arn } output "kops_chart_repo_policy_name" { - value = "${module.kops_chart_repo.policy_name}" + value = module.kops_chart_repo.policy_name } output "kops_chart_repo_policy_id" { - value = "${module.kops_chart_repo.policy_id}" + value = module.kops_chart_repo.policy_id } output "kops_chart_repo_policy_arn" { - value = "${module.kops_chart_repo.policy_arn}" + value = module.kops_chart_repo.policy_arn } diff --git a/deprecated/aws/kops-aws-platform/efs-provisioner.tf b/deprecated/aws/kops-aws-platform/efs-provisioner.tf index 71e32c5b9..c3bf5f054 100644 --- a/deprecated/aws/kops-aws-platform/efs-provisioner.tf +++ b/deprecated/aws/kops-aws-platform/efs-provisioner.tf @@ -1,23 +1,23 @@ variable "efs_enabled" { - type = "string" + type = string description = "Set to true to allow the module to create EFS resources" default = "false" } variable "kops_dns_zone_id" { - type = "string" + type = string default = "" description = "DNS Zone ID for kops. EFS DNS entries will be added to this zone. If empyty, zone ID will be retrieved from SSM Parameter store" } variable "efs_encrypted" { - type = "string" + type = string description = "If true, the disk will be encrypted" default = "false" } variable "efs_performance_mode" { - type = "string" + type = string description = "The file system performance mode. Can be either `generalPurpose` or `maxIO`" default = "generalPurpose" } @@ -28,42 +28,42 @@ variable "efs_provisioned_throughput_in_mibps" { } variable "efs_throughput_mode" { - type = "string" + type = string description = "Throughput mode for the file system. Defaults to bursting. Valid values: bursting, provisioned. When using provisioned, also set provisioned_throughput_in_mibps" default = "bursting" } data "aws_ssm_parameter" "kops_availability_zones" { - name = "${format(local.chamber_parameter_format, var.chamber_service_kops, "kops_availability_zones")}" + name = format(local.chamber_parameter_format, var.chamber_service_kops, "kops_availability_zones") } data "aws_ssm_parameter" "kops_zone_id" { - count = "${var.efs_enabled == "true" && var.kops_dns_zone_id == "" ? 1 : 0}" - name = "${format(local.chamber_parameter_format, var.chamber_service_kops, "kops_dns_zone_id")}" + count = var.efs_enabled == "true" && var.kops_dns_zone_id == "" ? 1 : 0 + name = format(local.chamber_parameter_format, var.chamber_service_kops, "kops_dns_zone_id") } locals { - kops_zone_id = "${coalesce(var.kops_dns_zone_id, join("", data.aws_ssm_parameter.kops_zone_id.*.value))}" + kops_zone_id = coalesce(var.kops_dns_zone_id, join("", data.aws_ssm_parameter.kops_zone_id.*.value)) } module "kops_efs_provisioner" { source = "git::https://github.com/cloudposse/terraform-aws-kops-efs.git?ref=tags/0.6.0" - enabled = "${var.efs_enabled}" - namespace = "${var.namespace}" - stage = "${var.stage}" + enabled = var.efs_enabled + namespace = var.namespace + stage = var.stage name = "efs-provisioner" - region = "${var.region}" + region = var.region availability_zones = ["${split(",", data.aws_ssm_parameter.kops_availability_zones.value)}"] - zone_id = "${local.kops_zone_id}" + zone_id = local.kops_zone_id cluster_name = "${var.region}.${var.zone_name}" - encrypted = "${var.efs_encrypted}" - performance_mode = "${var.efs_performance_mode}" + encrypted = var.efs_encrypted + performance_mode = var.efs_performance_mode - throughput_mode = "${var.efs_throughput_mode}" - provisioned_throughput_in_mibps = "${var.efs_provisioned_throughput_in_mibps}" + throughput_mode = var.efs_throughput_mode + provisioned_throughput_in_mibps = var.efs_provisioned_throughput_in_mibps - iam_role_max_session_duration = "${var.iam_role_max_session_duration}" + iam_role_max_session_duration = var.iam_role_max_session_duration tags = { Cluster = "${var.region}.${var.zone_name}" @@ -71,71 +71,71 @@ module "kops_efs_provisioner" { } resource "aws_ssm_parameter" "kops_efs_provisioner_role_name" { - count = "${var.efs_enabled == "true" ? 1 : 0}" - name = "${format(local.chamber_parameter_format, var.chamber_service, "kops_efs_provisioner_role_name")}" - value = "${module.kops_efs_provisioner.role_name}" + count = var.efs_enabled == "true" ? 1 : 0 + name = format(local.chamber_parameter_format, var.chamber_service, "kops_efs_provisioner_role_name") + value = module.kops_efs_provisioner.role_name description = "IAM role name for EFS provisioner" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_efs_file_system_id" { - count = "${var.efs_enabled == "true" ? 1 : 0}" - name = "${format(local.chamber_parameter_format, var.chamber_service, "kops_efs_file_system_id")}" - value = "${module.kops_efs_provisioner.efs_id}" + count = var.efs_enabled == "true" ? 1 : 0 + name = format(local.chamber_parameter_format, var.chamber_service, "kops_efs_file_system_id") + value = module.kops_efs_provisioner.efs_id description = "ID for shared EFS file system" type = "String" overwrite = "true" } output "kops_efs_provisioner_role_name" { - value = "${module.kops_efs_provisioner.role_name}" + value = module.kops_efs_provisioner.role_name } output "kops_efs_provisioner_role_unique_id" { - value = "${module.kops_efs_provisioner.role_unique_id}" + value = module.kops_efs_provisioner.role_unique_id } output "kops_efs_provisioner_role_arn" { - value = "${module.kops_efs_provisioner.role_arn}" + value = module.kops_efs_provisioner.role_arn } output "efs_arn" { - value = "${module.kops_efs_provisioner.efs_arn}" + value = module.kops_efs_provisioner.efs_arn description = "EFS ARN" } output "efs_id" { - value = "${module.kops_efs_provisioner.efs_id}" + value = module.kops_efs_provisioner.efs_id description = "EFS ID" } output "efs_host" { - value = "${module.kops_efs_provisioner.efs_host}" + value = module.kops_efs_provisioner.efs_host description = "EFS host" } output "efs_dns_name" { - value = "${module.kops_efs_provisioner.efs_dns_name}" + value = module.kops_efs_provisioner.efs_dns_name description = "EFS DNS name" } output "efs_mount_target_dns_names" { - value = "${module.kops_efs_provisioner.efs_mount_target_dns_names}" + value = module.kops_efs_provisioner.efs_mount_target_dns_names description = "EFS mount target DNS name" } output "efs_mount_target_ids" { - value = "${module.kops_efs_provisioner.efs_mount_target_ids}" + value = module.kops_efs_provisioner.efs_mount_target_ids description = "EFS mount target IDs" } output "efs_mount_target_ips" { - value = "${module.kops_efs_provisioner.efs_mount_target_ips}" + value = module.kops_efs_provisioner.efs_mount_target_ips description = "EFS mount target IPs" } output "efs_network_interface_ids" { - value = "${module.kops_efs_provisioner.efs_network_interface_ids}" + value = module.kops_efs_provisioner.efs_network_interface_ids description = "EFS network interface IDs" } diff --git a/deprecated/aws/kops-aws-platform/external-dns.tf b/deprecated/aws/kops-aws-platform/external-dns.tf index 2fdee8000..20c9b7f45 100644 --- a/deprecated/aws/kops-aws-platform/external-dns.tf +++ b/deprecated/aws/kops-aws-platform/external-dns.tf @@ -1,12 +1,12 @@ module "kops_external_dns" { source = "git::https://github.com/cloudposse/terraform-aws-kops-external-dns.git?ref=tags/0.3.0" - namespace = "${var.namespace}" - stage = "${var.stage}" + namespace = var.namespace + stage = var.stage name = "external-dns" cluster_name = "${var.region}.${var.zone_name}" - dns_zone_names = "${var.dns_zone_names}" + dns_zone_names = var.dns_zone_names - iam_role_max_session_duration = "${var.iam_role_max_session_duration}" + iam_role_max_session_duration = var.iam_role_max_session_duration tags = { Cluster = "${var.region}.${var.zone_name}" @@ -14,25 +14,25 @@ module "kops_external_dns" { } output "kops_external_dns_role_name" { - value = "${module.kops_external_dns.role_name}" + value = module.kops_external_dns.role_name } output "kops_external_dns_role_unique_id" { - value = "${module.kops_external_dns.role_unique_id}" + value = module.kops_external_dns.role_unique_id } output "kops_external_dns_role_arn" { - value = "${module.kops_external_dns.role_arn}" + value = module.kops_external_dns.role_arn } output "kops_external_dns_policy_name" { - value = "${module.kops_external_dns.policy_name}" + value = module.kops_external_dns.policy_name } output "kops_external_dns_policy_id" { - value = "${module.kops_external_dns.policy_id}" + value = module.kops_external_dns.policy_id } output "kops_external_dns_policy_arn" { - value = "${module.kops_external_dns.policy_arn}" + value = module.kops_external_dns.policy_arn } diff --git a/deprecated/aws/kops-aws-platform/flow-logs.tf b/deprecated/aws/kops-aws-platform/flow-logs.tf index 4cd2ba8cb..0ab06ff8b 100644 --- a/deprecated/aws/kops-aws-platform/flow-logs.tf +++ b/deprecated/aws/kops-aws-platform/flow-logs.tf @@ -1,63 +1,63 @@ variable "flow_logs_enabled" { - type = "string" + type = string default = "true" } module "flow_logs" { source = "git::https://github.com/cloudposse/terraform-aws-vpc-flow-logs-s3-bucket.git?ref=tags/0.1.1" name = "kops" - namespace = "${var.namespace}" - stage = "${var.stage}" - attributes = "${list("flow-logs")}" + namespace = var.namespace + stage = var.stage + attributes = list("flow-logs") - region = "${var.region}" + region = var.region - enabled = "${var.flow_logs_enabled}" + enabled = var.flow_logs_enabled - vpc_id = "${module.kops_metadata.vpc_id}" + vpc_id = module.kops_metadata.vpc_id } output "flow_logs_kms_key_arn" { - value = "${module.flow_logs.kms_key_arn}" + value = module.flow_logs.kms_key_arn description = "Flow logs KMS Key ARN" } output "flow_logs_kms_key_id" { - value = "${module.flow_logs.kms_key_id}" + value = module.flow_logs.kms_key_id description = "Flow logs KMS Key ID" } output "flow_logs_kms_alias_arn" { - value = "${module.flow_logs.kms_alias_arn}" + value = module.flow_logs.kms_alias_arn description = "Flow logs KMS Alias ARN" } output "flow_logs_kms_alias_name" { - value = "${module.flow_logs.kms_alias_name}" + value = module.flow_logs.kms_alias_name description = "Flow logs KMS Alias name" } output "flow_logs_bucket_domain_name" { - value = "${module.flow_logs.bucket_domain_name}" + value = module.flow_logs.bucket_domain_name description = "Flow logs FQDN of bucket" } output "flow_logs_bucket_id" { - value = "${module.flow_logs.bucket_id}" + value = module.flow_logs.bucket_id description = "Flow logs bucket Name (aka ID)" } output "flow_logs_bucket_arn" { - value = "${module.flow_logs.bucket_arn}" + value = module.flow_logs.bucket_arn description = "Flow logs bucket ARN" } output "flow_logs_bucket_prefix" { - value = "${module.flow_logs.bucket_prefix}" + value = module.flow_logs.bucket_prefix description = "Flow logs bucket prefix configured for lifecycle rules" } output "flow_logs_id" { - value = "${module.flow_logs.id}" + value = module.flow_logs.id description = "Flow logs ID" } diff --git a/deprecated/aws/kops-aws-platform/iam-authenticator.tf b/deprecated/aws/kops-aws-platform/iam-authenticator.tf index d0002ef63..9e232c57e 100644 --- a/deprecated/aws/kops-aws-platform/iam-authenticator.tf +++ b/deprecated/aws/kops-aws-platform/iam-authenticator.tf @@ -1,47 +1,47 @@ variable "kops_iam_enabled" { - type = "string" + type = string description = "Set to true to allow the module to create Kubernetes and IAM resources" default = "false" } variable "cluster_id" { - type = "string" + type = string description = "A unique-per-cluster identifier to prevent replay attacks. Good choices are a random token or a domain name that will be unique to your cluster" default = "random" } variable "kube_config_path" { - type = "string" + type = string default = "/dev/shm/kubecfg" description = "Path to the kube config file" } variable "admin_k8s_username" { - type = "string" + type = string description = "Kops admin username to be mapped to `admin_iam_role_arn`" default = "kubernetes-admin" } variable "admin_k8s_groups" { - type = "list" + type = list(string) description = "List of Kops groups to be mapped to `admin_iam_role_arn`" default = ["system:masters"] } variable "readonly_k8s_username" { - type = "string" + type = string description = "Kops readonly username to be mapped to `readonly_iam_role_arn`" default = "kubernetes-readonly" } variable "readonly_k8s_groups" { - type = "list" + type = list(string) description = "List of Kops groups to be mapped to `readonly_iam_role_arn`" default = ["view"] } resource "kubernetes_cluster_role_binding" "view" { - count = "${var.kops_iam_enabled == "true" ? 1 : 0}" + count = var.kops_iam_enabled == "true" ? 1 : 0 metadata { name = "view-binding" @@ -61,34 +61,34 @@ resource "kubernetes_cluster_role_binding" "view" { } variable "aws_root_account_id" { - type = "string" + type = string description = "AWS root account ID" } module "kops_admin_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" - stage = "${var.stage}" + stage = var.stage attributes = ["admin"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" - enabled = "${var.kops_iam_enabled}" + delimiter = var.delimiter + tags = var.tags + enabled = var.kops_iam_enabled } module "kops_readonly_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" - stage = "${var.stage}" + stage = var.stage attributes = ["readonly"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" - enabled = "${var.kops_iam_enabled}" + delimiter = var.delimiter + tags = var.tags + enabled = var.kops_iam_enabled } data "aws_iam_policy_document" "readonly" { - count = "${var.kops_iam_enabled == "true" ? 1 : 0}" + count = var.kops_iam_enabled == "true" ? 1 : 0 statement { actions = [ @@ -105,16 +105,16 @@ data "aws_iam_policy_document" "readonly" { } resource "aws_iam_role" "readonly" { - count = "${var.kops_iam_enabled == "true" ? 1 : 0}" - name = "${module.kops_readonly_label.id}" - assume_role_policy = "${data.aws_iam_policy_document.readonly.json}" + count = var.kops_iam_enabled == "true" ? 1 : 0 + name = module.kops_readonly_label.id + assume_role_policy = data.aws_iam_policy_document.readonly.json description = "The Kops readonly role for aws-iam-authenticator" - max_session_duration = "${var.iam_role_max_session_duration}" + max_session_duration = var.iam_role_max_session_duration } data "aws_iam_policy_document" "admin" { - count = "${var.kops_iam_enabled == "true" ? 1 : 0}" + count = var.kops_iam_enabled == "true" ? 1 : 0 statement { actions = [ @@ -131,23 +131,23 @@ data "aws_iam_policy_document" "admin" { } resource "aws_iam_role" "admin" { - count = "${var.kops_iam_enabled == "true" ? 1 : 0}" - name = "${module.kops_admin_label.id}" - assume_role_policy = "${data.aws_iam_policy_document.admin.json}" + count = var.kops_iam_enabled == "true" ? 1 : 0 + name = module.kops_admin_label.id + assume_role_policy = data.aws_iam_policy_document.admin.json description = "The Kops admin role for aws-iam-authenticator" - max_session_duration = "${var.iam_role_max_session_duration}" + max_session_duration = var.iam_role_max_session_duration } module "iam_authenticator_config" { - enabled = "${var.kops_iam_enabled}" + enabled = var.kops_iam_enabled source = "git::https://github.com/cloudposse/terraform-aws-kops-iam-authenticator-config.git?ref=tags/0.2.2" - cluster_id = "${var.cluster_id}" - kube_config_path = "${var.kube_config_path}" - admin_iam_role_arn = "${element(concat(aws_iam_role.admin.*.arn, list("")), 0)}" - admin_k8s_username = "${var.admin_k8s_username}" - admin_k8s_groups = "${var.admin_k8s_groups}" - readonly_iam_role_arn = "${element(concat(aws_iam_role.readonly.*.arn, list("")), 0)}" - readonly_k8s_username = "${var.readonly_k8s_username}" - readonly_k8s_groups = "${var.readonly_k8s_groups}" + cluster_id = var.cluster_id + kube_config_path = var.kube_config_path + admin_iam_role_arn = element(concat(aws_iam_role.admin.*.arn, list("")), 0) + admin_k8s_username = var.admin_k8s_username + admin_k8s_groups = var.admin_k8s_groups + readonly_iam_role_arn = element(concat(aws_iam_role.readonly.*.arn, list("")), 0) + readonly_k8s_username = var.readonly_k8s_username + readonly_k8s_groups = var.readonly_k8s_groups } diff --git a/deprecated/aws/kops-aws-platform/main.tf b/deprecated/aws/kops-aws-platform/main.tf index 20308663b..119a12e47 100644 --- a/deprecated/aws/kops-aws-platform/main.tf +++ b/deprecated/aws/kops-aws-platform/main.tf @@ -6,13 +6,13 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } module "kops_metadata" { source = "git::https://github.com/cloudposse/terraform-aws-kops-data-network.git?ref=tags/0.1.1" - enabled = "${var.flow_logs_enabled}" + enabled = var.flow_logs_enabled cluster_name = "${var.region}.${var.zone_name}" } @@ -25,9 +25,9 @@ resource "aws_default_security_group" "default" { # If kops is using a shared VPC, then it is likely that the kops_metadata module will # return an empty vpc_id, in which case we will leave it to the VPC owner to manage # the default security group. - count = "${module.kops_metadata.vpc_id == "" ? 0 : 1}" + count = module.kops_metadata.vpc_id == "" ? 0 : 1 - vpc_id = "${module.kops_metadata.vpc_id}" + vpc_id = module.kops_metadata.vpc_id tags = { Name = "Default Security Group" diff --git a/deprecated/aws/kops-aws-platform/variables.tf b/deprecated/aws/kops-aws-platform/variables.tf index 842eeec88..e0ea2e48a 100644 --- a/deprecated/aws/kops-aws-platform/variables.tf +++ b/deprecated/aws/kops-aws-platform/variables.tf @@ -1,59 +1,59 @@ variable "aws_assume_role_arn" { - type = "string" + type = string description = "AWS IAM Role for Terraform to assume during operation" } variable "region" { - type = "string" + type = string description = "AWS region" } variable "zone_name" { - type = "string" + type = string description = "DNS zone name" } variable "dns_zone_names" { - type = "list" + type = list(string) description = "Names of zones for external-dns to manage (e.g. `us-east-1.cloudposse.com` or `cluster-1.cloudposse.com`)" } variable "permitted_nodes" { - type = "string" + type = string description = "Kops kubernetes nodes that are permitted to assume IAM roles (e.g. 'nodes', 'masters', or 'both')" default = "both" } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter to be used between `namespace`, `stage`, `name` and `attributes`" } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags (e.g. `map('BusinessUnit','XYZ')`" } variable "chamber_service" { - type = "string" + type = string default = "kops" description = "Service under which to store SSM parameters" } variable "chamber_service_kops" { - type = "string" + type = string default = "kops" description = "Service where kops stores its configuration information" } diff --git a/deprecated/aws/kops-iam-users/corp.tf b/deprecated/aws/kops-iam-users/corp.tf index ca22476e5..1aaa56476 100644 --- a/deprecated/aws/kops-iam-users/corp.tf +++ b/deprecated/aws/kops-iam-users/corp.tf @@ -1,47 +1,47 @@ module "kops_admin_corp_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "corp" attributes = ["admin"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_readonly_corp_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "corp" attributes = ["readonly"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_admin_access_group_corp" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "corp") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "corp") == true ? "true" : "false" + namespace = var.namespace stage = "corp" name = "kops" attributes = ["admin"] - role_name = "${module.kops_admin_corp_label.id}" + role_name = module.kops_admin_corp_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.corp_account_id}" + member_account_id = data.terraform_remote_state.accounts.corp_account_id require_mfa = "true" } module "kops_readonly_access_group_corp" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "corp") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "corp") == true ? "true" : "false" + namespace = var.namespace stage = "corp" name = "kops" attributes = ["readonly"] - role_name = "${module.kops_readonly_corp_label.id}" + role_name = module.kops_readonly_corp_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.corp_account_id}" + member_account_id = data.terraform_remote_state.accounts.corp_account_id require_mfa = "true" } diff --git a/deprecated/aws/kops-iam-users/data.tf b/deprecated/aws/kops-iam-users/data.tf index 6d5aa462f..9fee6f4af 100644 --- a/deprecated/aws/kops-iam-users/data.tf +++ b/deprecated/aws/kops-iam-users/data.tf @@ -1,47 +1,47 @@ module "kops_admin_data_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "data" attributes = ["admin"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_readonly_data_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "data" attributes = ["readonly"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_admin_access_group_data" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "data") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "data") == true ? "true" : "false" + namespace = var.namespace stage = "data" name = "kops" attributes = ["admin"] - role_name = "${module.kops_admin_data_label.id}" + role_name = module.kops_admin_data_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.data_account_id}" + member_account_id = data.terraform_remote_state.accounts.data_account_id require_mfa = "true" } module "kops_readonly_access_group_data" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "data") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "data") == true ? "true" : "false" + namespace = var.namespace stage = "data" name = "kops" attributes = ["readonly"] - role_name = "${module.kops_readonly_data_label.id}" + role_name = module.kops_readonly_data_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.data_account_id}" + member_account_id = data.terraform_remote_state.accounts.data_account_id require_mfa = "true" } diff --git a/deprecated/aws/kops-iam-users/dev.tf b/deprecated/aws/kops-iam-users/dev.tf index dd4522cf9..8da80f9b4 100644 --- a/deprecated/aws/kops-iam-users/dev.tf +++ b/deprecated/aws/kops-iam-users/dev.tf @@ -1,47 +1,47 @@ module "kops_admin_dev_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "dev" attributes = ["admin"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_readonly_dev_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "dev" attributes = ["readonly"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_admin_access_group_dev" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "dev") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "dev") == true ? "true" : "false" + namespace = var.namespace stage = "dev" name = "kops" attributes = ["admin"] - role_name = "${module.kops_admin_dev_label.id}" + role_name = module.kops_admin_dev_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.dev_account_id}" + member_account_id = data.terraform_remote_state.accounts.dev_account_id require_mfa = "true" } module "kops_readonly_access_group_dev" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "dev") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "dev") == true ? "true" : "false" + namespace = var.namespace stage = "dev" name = "kops" attributes = ["readonly"] - role_name = "${module.kops_readonly_dev_label.id}" + role_name = module.kops_readonly_dev_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.dev_account_id}" + member_account_id = data.terraform_remote_state.accounts.dev_account_id require_mfa = "true" } diff --git a/deprecated/aws/kops-iam-users/main.tf b/deprecated/aws/kops-iam-users/main.tf index 90231fa3b..2d367c808 100644 --- a/deprecated/aws/kops-iam-users/main.tf +++ b/deprecated/aws/kops-iam-users/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } diff --git a/deprecated/aws/kops-iam-users/prod.tf b/deprecated/aws/kops-iam-users/prod.tf index 60f7bcb74..4d76b140a 100644 --- a/deprecated/aws/kops-iam-users/prod.tf +++ b/deprecated/aws/kops-iam-users/prod.tf @@ -1,47 +1,47 @@ module "kops_admin_prod_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "prod" attributes = ["admin"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_readonly_prod_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "prod" attributes = ["readonly"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_admin_access_group_prod" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "prod") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "prod") == true ? "true" : "false" + namespace = var.namespace stage = "prod" name = "kops" attributes = ["admin"] - role_name = "${module.kops_admin_prod_label.id}" + role_name = module.kops_admin_prod_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.prod_account_id}" + member_account_id = data.terraform_remote_state.accounts.prod_account_id require_mfa = "true" } module "kops_readonly_access_group_prod" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "prod") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "prod") == true ? "true" : "false" + namespace = var.namespace stage = "prod" name = "kops" attributes = ["readonly"] - role_name = "${module.kops_readonly_prod_label.id}" + role_name = module.kops_readonly_prod_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.prod_account_id}" + member_account_id = data.terraform_remote_state.accounts.prod_account_id require_mfa = "true" } diff --git a/deprecated/aws/kops-iam-users/staging.tf b/deprecated/aws/kops-iam-users/staging.tf index fba410e86..2677fa0de 100644 --- a/deprecated/aws/kops-iam-users/staging.tf +++ b/deprecated/aws/kops-iam-users/staging.tf @@ -1,47 +1,47 @@ module "kops_admin_staging_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "staging" attributes = ["admin"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_readonly_staging_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "staging" attributes = ["readonly"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_admin_access_group_staging" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "staging") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "staging") == true ? "true" : "false" + namespace = var.namespace stage = "staging" name = "kops" attributes = ["admin"] - role_name = "${module.kops_admin_staging_label.id}" + role_name = module.kops_admin_staging_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.staging_account_id}" + member_account_id = data.terraform_remote_state.accounts.staging_account_id require_mfa = "true" } module "kops_readonly_access_group_staging" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "staging") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "staging") == true ? "true" : "false" + namespace = var.namespace stage = "staging" name = "kops" attributes = ["readonly"] - role_name = "${module.kops_readonly_staging_label.id}" + role_name = module.kops_readonly_staging_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.staging_account_id}" + member_account_id = data.terraform_remote_state.accounts.staging_account_id require_mfa = "true" } diff --git a/deprecated/aws/kops-iam-users/testing.tf b/deprecated/aws/kops-iam-users/testing.tf index 515b16a12..393dc202a 100644 --- a/deprecated/aws/kops-iam-users/testing.tf +++ b/deprecated/aws/kops-iam-users/testing.tf @@ -1,47 +1,47 @@ module "kops_admin_testing_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "testing" attributes = ["admin"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_readonly_testing_label" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" - namespace = "${var.namespace}" + namespace = var.namespace name = "kops" stage = "testing" attributes = ["readonly"] - delimiter = "${var.delimiter}" - tags = "${var.tags}" + delimiter = var.delimiter + tags = var.tags enabled = "true" } module "kops_admin_access_group_testing" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "testing") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "testing") == true ? "true" : "false" + namespace = var.namespace stage = "testing" name = "kops" attributes = ["admin"] - role_name = "${module.kops_admin_testing_label.id}" + role_name = module.kops_admin_testing_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.testing_account_id}" + member_account_id = data.terraform_remote_state.accounts.testing_account_id require_mfa = "true" } module "kops_readonly_access_group_testing" { source = "git::https://github.com/cloudposse/terraform-aws-organization-access-group.git?ref=tags/0.4.0" - enabled = "${contains(var.kops_iam_accounts_enabled, "testing") == true ? "true" : "false"}" - namespace = "${var.namespace}" + enabled = contains(var.kops_iam_accounts_enabled, "testing") == true ? "true" : "false" + namespace = var.namespace stage = "testing" name = "kops" attributes = ["readonly"] - role_name = "${module.kops_readonly_testing_label.id}" + role_name = module.kops_readonly_testing_label.id user_names = [] - member_account_id = "${data.terraform_remote_state.accounts.testing_account_id}" + member_account_id = data.terraform_remote_state.accounts.testing_account_id require_mfa = "true" } diff --git a/deprecated/aws/kops-iam-users/variables.tf b/deprecated/aws/kops-iam-users/variables.tf index 4100e1ec6..6f3f3bb47 100644 --- a/deprecated/aws/kops-iam-users/variables.tf +++ b/deprecated/aws/kops-iam-users/variables.tf @@ -1,31 +1,31 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "kops_iam_accounts_enabled" { - type = "list" + type = list(string) description = "Accounts to create an IAM role and group for Kops users" default = ["dev", "staging", "prod", "testing"] } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage, e.g. 'prod', 'staging', 'dev', or 'test'" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter to be used between `namespace`, `stage`, `name` and `attributes`" } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags (e.g. `map('BusinessUnit','XYZ')`" } diff --git a/deprecated/aws/kops-legacy-account-vpc-peering/main.tf b/deprecated/aws/kops-legacy-account-vpc-peering/main.tf index 902497d5d..35b3cd021 100644 --- a/deprecated/aws/kops-legacy-account-vpc-peering/main.tf +++ b/deprecated/aws/kops-legacy-account-vpc-peering/main.tf @@ -6,36 +6,36 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } # Lookup VPC of the kops cluster module "kops_metadata" { source = "git::https://github.com/cloudposse/terraform-aws-kops-metadata.git?ref=tags/0.2.1" - enabled = "${var.enabled}" + enabled = var.enabled dns_zone = "${var.region}.${var.zone_name}" - vpc_tag = "${var.vpc_tag}" + vpc_tag = var.vpc_tag vpc_tag_values = ["${var.vpc_tag_values}"] } module "kops_legacy_account_vpc_peering" { source = "git::https://github.com/cloudposse/terraform-aws-vpc-peering-multi-account.git?ref=tags/0.5.0" - enabled = "${var.enabled}" + enabled = var.enabled - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name auto_accept = true # Requester - requester_vpc_id = "${module.kops_metadata.vpc_id}" - requester_region = "${var.region}" - requester_aws_assume_role_arn = "${var.aws_assume_role_arn}" + requester_vpc_id = module.kops_metadata.vpc_id + requester_region = var.region + requester_aws_assume_role_arn = var.aws_assume_role_arn # Accepter - accepter_vpc_id = "${var.legacy_account_vpc_id}" - accepter_region = "${var.legacy_account_region}" - accepter_aws_assume_role_arn = "${var.legacy_account_assume_role_arn}" + accepter_vpc_id = var.legacy_account_vpc_id + accepter_region = var.legacy_account_region + accepter_aws_assume_role_arn = var.legacy_account_assume_role_arn } diff --git a/deprecated/aws/kops-legacy-account-vpc-peering/outputs.tf b/deprecated/aws/kops-legacy-account-vpc-peering/outputs.tf index 6e5a0ecae..f75cbe3da 100644 --- a/deprecated/aws/kops-legacy-account-vpc-peering/outputs.tf +++ b/deprecated/aws/kops-legacy-account-vpc-peering/outputs.tf @@ -1,19 +1,19 @@ output "accepter_accept_status" { description = "Accepter VPC peering connection request status" - value = "${module.kops_legacy_account_vpc_peering.accepter_accept_status}" + value = module.kops_legacy_account_vpc_peering.accepter_accept_status } output "accepter_connection_id" { description = "Accepter VPC peering connection ID" - value = "${module.kops_legacy_account_vpc_peering.accepter_connection_id}" + value = module.kops_legacy_account_vpc_peering.accepter_connection_id } output "requester_accept_status" { description = "Requester VPC peering connection request status" - value = "${module.kops_legacy_account_vpc_peering.requester_accept_status}" + value = module.kops_legacy_account_vpc_peering.requester_accept_status } output "requester_connection_id" { description = "Requester VPC peering connection ID" - value = "${module.kops_legacy_account_vpc_peering.requester_connection_id}" + value = module.kops_legacy_account_vpc_peering.requester_connection_id } diff --git a/deprecated/aws/kops-legacy-account-vpc-peering/variables.tf b/deprecated/aws/kops-legacy-account-vpc-peering/variables.tf index 7c742b0f9..44fd2fade 100644 --- a/deprecated/aws/kops-legacy-account-vpc-peering/variables.tf +++ b/deprecated/aws/kops-legacy-account-vpc-peering/variables.tf @@ -1,60 +1,60 @@ variable "aws_assume_role_arn" {} variable "enabled" { - type = "string" + type = string description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources" default = "true" } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `eg` or `cp`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string description = "Application or solution name (e.g. `app`)" default = "vpc-peering" } variable "region" { - type = "string" + type = string description = "AWS region" } variable "zone_name" { - type = "string" + type = string description = "DNS zone name" } variable "vpc_tag" { - type = "string" + type = string default = "Name" description = "Tag used to lookup the Kops VPC" } variable "vpc_tag_values" { - type = "list" + type = list(string) default = [] description = "Tag values list to lookup the Kops VPC" } variable "legacy_account_assume_role_arn" { - type = "string" + type = string description = "Legacy account assume role ARN" } variable "legacy_account_region" { - type = "string" + type = string description = "Legacy account AWS region" } variable "legacy_account_vpc_id" { - type = "string" + type = string description = "Legacy account VPC ID" } diff --git a/deprecated/aws/kops/README.md b/deprecated/aws/kops/README.md index b283a050c..a5cd5ae3d 100644 --- a/deprecated/aws/kops/README.md +++ b/deprecated/aws/kops/README.md @@ -1,6 +1,6 @@ # Kubernetes Ops (kops) -This project provisions dependencies for `kops` clusters including the DNS zone, S3 bucket for state storage, SSH keypair. +This project provisions dependencies for `kops` clusters including the DNS zone, S3 bucket for state storage, SSH keypair. It also writes the computed settings to SSM for usage by other modules or tools. @@ -32,7 +32,7 @@ This is roughly the process to get up and running. These instructions assume you 9. Run `make kops/apply` to build the cluster 10. Run `make kops/validate` to view cluster status. Note, it will take ~10 minutes to come online (depending on cluster size) -Once the cluster is online, you can interact with it using `kubectl`. +Once the cluster is online, you can interact with it using `kubectl`. To start, first run this to export `kubecfg` from the `kops` state store (required to access the cluster): ``` @@ -40,4 +40,3 @@ make kops/export ``` Then all the standard `kubectl` commands will work (e.g. `kubectl get nodes`). - diff --git a/deprecated/aws/kops/main.tf b/deprecated/aws/kops/main.tf index 37500de97..62c021823 100644 --- a/deprecated/aws/kops/main.tf +++ b/deprecated/aws/kops/main.tf @@ -6,49 +6,49 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } data "aws_availability_zones" "default" {} locals { - chamber_service = "${var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service}" - computed_availability_zones = "${data.aws_availability_zones.default.names}" - distinct_availability_zones = "${distinct(compact(concat(var.availability_zones, local.computed_availability_zones)))}" + chamber_service = var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service + computed_availability_zones = data.aws_availability_zones.default.names + distinct_availability_zones = distinct(compact(concat(var.availability_zones, local.computed_availability_zones))) # If we are creating the VPC, concatenate the predefined AZs with the computed AZs and select the first N distinct AZs. # If we are using a shared VPC, use the availability zones dictated by the VPC - availability_zones = "${split(",", var.create_vpc == "true" ? join(",", slice(local.distinct_availability_zones, 0, var.availability_zone_count)) : join("", data.aws_ssm_parameter.availability_zones.*.value))}" + availability_zones = split(",", var.create_vpc == "true" ? join(",", slice(local.distinct_availability_zones, 0, var.availability_zone_count)) : join("", data.aws_ssm_parameter.availability_zones.*.value)) - availability_zone_count = "${length(local.availability_zones)}" + availability_zone_count = length(local.availability_zones) } module "kops_state_backend" { source = "git::https://github.com/cloudposse/terraform-aws-kops-state-backend.git?ref=tags/0.3.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name attributes = ["${var.kops_attribute}"] - cluster_name = "${coalesce(var.cluster_name_prefix, var.resource_region, var.region)}" - parent_zone_name = "${var.zone_name}" - zone_name = "${var.complete_zone_name}" - domain_enabled = "${var.domain_enabled}" - force_destroy = "${var.force_destroy}" - region = "${coalesce(var.state_store_region, var.region)}" - create_bucket = "${var.create_state_store_bucket}" + cluster_name = coalesce(var.cluster_name_prefix, var.resource_region, var.region) + parent_zone_name = var.zone_name + zone_name = var.complete_zone_name + domain_enabled = var.domain_enabled + force_destroy = var.force_destroy + region = coalesce(var.state_store_region, var.region) + create_bucket = var.create_state_store_bucket } module "ssh_key_pair" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-tls-ssh-key-pair.git?ref=tags/0.2.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name attributes = ["${coalesce(var.resource_region, var.region)}"] - ssm_path_prefix = "${local.chamber_service}" - rsa_bits = "${var.ssh_key_rsa_bits}" - ssh_key_algorithm = "${var.ssh_key_algorithm}" - ecdsa_curve = "${var.ssh_key_ecdsa_curve}" + ssm_path_prefix = local.chamber_service + rsa_bits = var.ssh_key_rsa_bits + ssh_key_algorithm = var.ssh_key_algorithm + ecdsa_curve = var.ssh_key_ecdsa_curve ssh_public_key_name = "kops_ssh_public_key" ssh_private_key_name = "kops_ssh_private_key" } @@ -56,70 +56,70 @@ module "ssh_key_pair" { # Allocate one large subnet for each AZ, plus one additional one for the utility subnets. module "private_subnets" { source = "subnets" - iprange = "${local.vpc_network_cidr}" - newbits = "${var.private_subnets_newbits > 0 ? var.private_subnets_newbits : local.availability_zone_count}" - netnum = "${var.private_subnets_netnum}" - subnet_count = "${local.availability_zone_count + 1}" + iprange = local.vpc_network_cidr + newbits = var.private_subnets_newbits > 0 ? var.private_subnets_newbits : local.availability_zone_count + netnum = var.private_subnets_netnum + subnet_count = local.availability_zone_count + 1 } # Divide up the first private subnet and use it for the utility subnet module "utility_subnets" { source = "subnets" - iprange = "${module.private_subnets.cidrs[0]}" - newbits = "${var.utility_subnets_newbits > 0 ? var.utility_subnets_newbits : local.availability_zone_count}" - netnum = "${var.utility_subnets_netnum}" - subnet_count = "${local.availability_zone_count}" + iprange = module.private_subnets.cidrs[0] + newbits = var.utility_subnets_newbits > 0 ? var.utility_subnets_newbits : local.availability_zone_count + netnum = var.utility_subnets_netnum + subnet_count = local.availability_zone_count } ####### # If create_vpc is not true, then we import all the VPC configuration from the VPC chamber service # data "aws_ssm_parameter" "vpc_id" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "vpc_id")}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "vpc_id") } data "aws_ssm_parameter" "vpc_cidr_block" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "cidr_block")}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "cidr_block") } ### # The following are lists, and must all be the same size and in the same order # data "aws_ssm_parameter" "availability_zones" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "availability_zones")}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "availability_zones") } # List of NAT gateways from private subnet to public, one per subnet, which is one per availability zone data "aws_ssm_parameter" "nat_gateways" { - count = "${var.create_vpc == "false" && var.use_shared_nat_gateways == "true" ? 1 : 0}" - name = "${format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "nat_gateways")}" + count = var.create_vpc == "false" && var.use_shared_nat_gateways == "true" ? 1 : 0 + name = format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "nat_gateways") } # List of private subnet CIDR blocks, one per availability zone data "aws_ssm_parameter" "private_subnet_cidrs" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "private_subnet_cidrs")}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "private_subnet_cidrs") } # List of private subnet AWS IDs, one per availability zone data "aws_ssm_parameter" "private_subnet_ids" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "private_subnet_ids")}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "private_subnet_ids") } # List of public subnet CIDR blocks, one per availability zone data "aws_ssm_parameter" "public_subnet_cidrs" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "public_subnet_cidrs")}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "public_subnet_cidrs") } # List of public subnet AWS IDs, one per availability zone data "aws_ssm_parameter" "public_subnet_ids" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "public_subnet_ids")}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.vpc_chamber_parameter_name, var.vpc_chamber_service, var.vpc_paramter_prefix, "public_subnet_ids") } # @@ -127,24 +127,24 @@ data "aws_ssm_parameter" "public_subnet_ids" { ###### locals { - vpc_network_cidr = "${var.create_vpc == "true" ? var.network_cidr : join("", data.aws_ssm_parameter.vpc_cidr_block.*.value)}" - private_subnet_cidrs = "${var.create_vpc == "true" ? join(",", slice(module.private_subnets.cidrs, 1, local.availability_zone_count + 1)) : join("", data.aws_ssm_parameter.private_subnet_cidrs.*.value)}" - utility_subnet_cidrs = "${var.create_vpc == "true" ? join(",", module.utility_subnets.cidrs) : join("", data.aws_ssm_parameter.public_subnet_cidrs.*.value)}" + vpc_network_cidr = var.create_vpc == "true" ? var.network_cidr : join("", data.aws_ssm_parameter.vpc_cidr_block.*.value) + private_subnet_cidrs = var.create_vpc == "true" ? join(",", slice(module.private_subnets.cidrs, 1, local.availability_zone_count + 1)) : join("", data.aws_ssm_parameter.private_subnet_cidrs.*.value) + utility_subnet_cidrs = var.create_vpc == "true" ? join(",", module.utility_subnets.cidrs) : join("", data.aws_ssm_parameter.public_subnet_cidrs.*.value) } # These parameters correspond to the kops manifest template: # Read more: resource "aws_ssm_parameter" "kops_cluster_name" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_cluster_name")}" - value = "${module.kops_state_backend.zone_name}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_cluster_name") + value = module.kops_state_backend.zone_name description = "Kops cluster name" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_state_store" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_state_store")}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_state_store") value = "s3://${module.kops_state_backend.bucket_name}" description = "Kops state store S3 bucket name" type = "String" @@ -152,32 +152,32 @@ resource "aws_ssm_parameter" "kops_state_store" { } resource "aws_ssm_parameter" "kops_state_store_region" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_state_store_region")}" - value = "${module.kops_state_backend.bucket_region}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_state_store_region") + value = module.kops_state_backend.bucket_region description = "Kops state store (S3 bucket) region" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_dns_zone" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_dns_zone")}" - value = "${module.kops_state_backend.zone_name}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_dns_zone") + value = module.kops_state_backend.zone_name description = "Kops DNS zone name" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_dns_zone_id" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_dns_zone_id")}" - value = "${module.kops_state_backend.zone_id}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_dns_zone_id") + value = module.kops_state_backend.zone_id description = "Kops DNS zone ID" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_network_cidr" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_network_cidr")}" - value = "${local.vpc_network_cidr}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_network_cidr") + value = local.vpc_network_cidr description = "CIDR block of the kops virtual network" type = "String" overwrite = "true" @@ -185,9 +185,9 @@ resource "aws_ssm_parameter" "kops_network_cidr" { # If we are using a shared VPC, we save its AWS ID here. If kops is creating the VPC, we do not export the ID resource "aws_ssm_parameter" "kops_shared_vpc_id" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_shared_vpc_id")}" - value = "${join("", data.aws_ssm_parameter.vpc_id.*.value)}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.chamber_parameter_name, local.chamber_service, "kops_shared_vpc_id") + value = join("", data.aws_ssm_parameter.vpc_id.*.value) description = "Kops (shared) VPC AWS ID" type = "String" overwrite = "true" @@ -195,9 +195,9 @@ resource "aws_ssm_parameter" "kops_shared_vpc_id" { # If we are using a shared VPC, we save the list of NAT gateway IDs here. If kops is creating the VPC, we do not export the IDs resource "aws_ssm_parameter" "kops_shared_nat_gateways" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_shared_nat_gateways")}" - value = "${var.use_shared_nat_gateways == "true" ? join("", data.aws_ssm_parameter.nat_gateways.*.value) : replace(local.private_subnet_cidrs, "/[^,]+/", "External")}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.chamber_parameter_name, local.chamber_service, "kops_shared_nat_gateways") + value = var.use_shared_nat_gateways == "true" ? join("", data.aws_ssm_parameter.nat_gateways.*.value) : replace(local.private_subnet_cidrs, "/[^,]+/", "External") description = "Kops (shared) private subnet NAT gateway AWS IDs" type = "String" overwrite = "true" @@ -205,9 +205,9 @@ resource "aws_ssm_parameter" "kops_shared_nat_gateways" { # If we are using a shared VPC, we save the list of private subnet IDs here. If kops is creating the VPC, we do not export the IDs resource "aws_ssm_parameter" "kops_shared_private_subnet_ids" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_shared_private_subnet_ids")}" - value = "${join("", data.aws_ssm_parameter.private_subnet_ids.*.value)}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.chamber_parameter_name, local.chamber_service, "kops_shared_private_subnet_ids") + value = join("", data.aws_ssm_parameter.private_subnet_ids.*.value) description = "Kops private subnet AWS IDs" type = "String" overwrite = "true" @@ -215,41 +215,41 @@ resource "aws_ssm_parameter" "kops_shared_private_subnet_ids" { # If we are using a shared VPC, we save the list of utility (public) subnet IDs here. If kops is creating the VPC, we do not export the IDs resource "aws_ssm_parameter" "kops_shared_utility_subnet_ids" { - count = "${var.create_vpc == "true" ? 0 : 1}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_shared_utility_subnet_ids")}" - value = "${join("", data.aws_ssm_parameter.public_subnet_ids.*.value)}" + count = var.create_vpc == "true" ? 0 : 1 + name = format(var.chamber_parameter_name, local.chamber_service, "kops_shared_utility_subnet_ids") + value = join("", data.aws_ssm_parameter.public_subnet_ids.*.value) description = "Kops utility subnet AWS IDs" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_private_subnets" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_private_subnets")}" - value = "${local.private_subnet_cidrs}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_private_subnets") + value = local.private_subnet_cidrs description = "Kops private subnet CIDRs" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_utility_subnets" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_utility_subnets")}" - value = "${local.utility_subnet_cidrs}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_utility_subnets") + value = local.utility_subnet_cidrs description = "Kops utility subnet CIDRs" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_non_masquerade_cidr" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_non_masquerade_cidr")}" - value = "${var.kops_non_masquerade_cidr}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_non_masquerade_cidr") + value = var.kops_non_masquerade_cidr description = "The CIDR range for Pod IPs" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "kops_availability_zones" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "kops_availability_zones")}" - value = "${join(",", local.availability_zones)}" + name = format(var.chamber_parameter_name, local.chamber_service, "kops_availability_zones") + value = join(",", local.availability_zones) description = "Kops availability zones in which cluster will be provisioned" type = "String" overwrite = "true" diff --git a/deprecated/aws/kops/outputs.tf b/deprecated/aws/kops/outputs.tf index 3674c4fbe..79cc24170 100644 --- a/deprecated/aws/kops/outputs.tf +++ b/deprecated/aws/kops/outputs.tf @@ -1,67 +1,67 @@ output "parent_zone_id" { - value = "${module.kops_state_backend.parent_zone_id}" + value = module.kops_state_backend.parent_zone_id } output "parent_zone_name" { - value = "${module.kops_state_backend.parent_zone_name}" + value = module.kops_state_backend.parent_zone_name } output "zone_id" { - value = "${module.kops_state_backend.zone_id}" + value = module.kops_state_backend.zone_id } output "zone_name" { - value = "${module.kops_state_backend.zone_name}" + value = module.kops_state_backend.zone_name } output "bucket_name" { - value = "${module.kops_state_backend.bucket_name}" + value = module.kops_state_backend.bucket_name } output "bucket_region" { - value = "${module.kops_state_backend.bucket_region}" + value = module.kops_state_backend.bucket_region } output "bucket_domain_name" { - value = "${module.kops_state_backend.bucket_domain_name}" + value = module.kops_state_backend.bucket_domain_name } output "bucket_id" { - value = "${module.kops_state_backend.bucket_id}" + value = module.kops_state_backend.bucket_id } output "bucket_arn" { - value = "${module.kops_state_backend.bucket_arn}" + value = module.kops_state_backend.bucket_arn } output "ssh_public_key" { - value = "${module.ssh_key_pair.public_key}" + value = module.ssh_key_pair.public_key } output "availability_zones" { - value = "${join(",", local.availability_zones)}" + value = join(",", local.availability_zones) } output "kops_shared_vpc_id" { - value = "${join("", aws_ssm_parameter.kops_shared_vpc_id.*.value)}" + value = join("", aws_ssm_parameter.kops_shared_vpc_id.*.value) } output "kops_shared_nat_gateways" { - value = "${join("", aws_ssm_parameter.kops_shared_nat_gateways.*.value)}" + value = join("", aws_ssm_parameter.kops_shared_nat_gateways.*.value) } output "kops_shared_utility_subnet_ids" { - value = "${join("", aws_ssm_parameter.kops_shared_utility_subnet_ids.*.value)}" + value = join("", aws_ssm_parameter.kops_shared_utility_subnet_ids.*.value) } output "kops_shared_private_subnet_ids" { - value = "${join("", aws_ssm_parameter.kops_shared_private_subnet_ids.*.value)}" + value = join("", aws_ssm_parameter.kops_shared_private_subnet_ids.*.value) } output "private_subnets" { - value = "${local.private_subnet_cidrs}" + value = local.private_subnet_cidrs } output "utility_subnets" { - value = "${local.utility_subnet_cidrs}" + value = local.utility_subnet_cidrs } diff --git a/deprecated/aws/kops/subnets/main.tf b/deprecated/aws/kops/subnets/main.tf index 66b6ef7cf..7d7d5fea5 100644 --- a/deprecated/aws/kops/subnets/main.tf +++ b/deprecated/aws/kops/subnets/main.tf @@ -1,7 +1,7 @@ # Read more: data "null_data_source" "subnets" { - count = "${var.subnet_count}" + count = var.subnet_count inputs = { cidr = "${cidrsubnet(var.iprange, var.newbits, var.netnum + count.index)}" diff --git a/deprecated/aws/kops/subnets/outputs.tf b/deprecated/aws/kops/subnets/outputs.tf index 0c731acc5..fc92a19f5 100644 --- a/deprecated/aws/kops/subnets/outputs.tf +++ b/deprecated/aws/kops/subnets/outputs.tf @@ -1,7 +1,7 @@ output "iprange" { - value = "${var.iprange}" + value = var.iprange } output "cidrs" { - value = "${data.null_data_source.subnets.*.outputs.cidr}" + value = data.null_data_source.subnets.*.outputs.cidr } diff --git a/deprecated/aws/kops/variables.tf b/deprecated/aws/kops/variables.tf index 5b4a18d48..1281f713c 100644 --- a/deprecated/aws/kops/variables.tf +++ b/deprecated/aws/kops/variables.tf @@ -1,55 +1,55 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string description = "Name (e.g. `kops`)" default = "kops" } variable "region" { - type = "string" + type = string default = "" - description = "AWS region for resources. Can be overriden by `resource_region` and `state_store_region`" + description = "AWS region for resources. Can be overridden by `resource_region` and `state_store_region`" } variable "state_store_region" { - type = "string" + type = string default = "" description = "Region where to create the S3 bucket for the kops state store. Defaults to `var.region`" } variable "resource_region" { - type = "string" + type = string default = "" description = "Region where to create region-specific resources. Defaults to `var.region`" } variable "create_state_store_bucket" { - type = "string" + type = string default = "true" description = "Set to `false` to use existing S3 bucket (e.g. from another region)" } variable "cluster_name_prefix" { - type = "string" + type = string default = "" description = "Prefix to add before parent DNS zone name to identify this cluster, e.g. `us-east-1`. Defaults to `var.resource_region`" } variable "availability_zones" { - type = "list" + type = list(string) description = "List of availability zones in which to provision the cluster (should be an odd number to avoid split-brain)." default = [] } @@ -60,48 +60,48 @@ variable "availability_zone_count" { } variable "zone_name" { - type = "string" + type = string description = "DNS zone name" } variable "domain_enabled" { - type = "string" + type = string description = "Enable DNS Zone creation for kops" default = "true" } variable "force_destroy" { - type = "string" + type = string description = "A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without errors. These objects are not recoverable." default = "false" } variable "ssh_key_algorithm" { - type = "string" + type = string default = "RSA" description = "SSH key algorithm to use. Currently-supported values are 'RSA' and 'ECDSA'" } variable "ssh_key_rsa_bits" { - type = "string" + type = string description = "When ssh_key_algorithm is 'RSA', the size of the generated RSA key in bits" default = "4096" } variable "ssh_key_ecdsa_curve" { - type = "string" + type = string description = "When ssh_key_algorithm is 'ECDSA', the name of the elliptic curve to use. May be any one of 'P256', 'P384' or P521'" default = "P521" } variable "kops_attribute" { - type = "string" + type = string description = "Additional attribute to kops state bucket" default = "state" } variable "complete_zone_name" { - type = "string" + type = string description = "Region or any classifier prefixed to zone name" default = "$${name}.$${parent_zone_name}" } diff --git a/deprecated/aws/opsgenie/providers.tf b/deprecated/aws/opsgenie/providers.tf old mode 100755 new mode 100644 diff --git a/deprecated/aws/opsgenie/versions.tf b/deprecated/aws/opsgenie/versions.tf old mode 100755 new mode 100644 index 93d96f5e0..369383021 --- a/deprecated/aws/opsgenie/versions.tf +++ b/deprecated/aws/opsgenie/versions.tf @@ -7,7 +7,7 @@ terraform { version = ">= 2.0" } template = { - source = "hashicorp/template" + source = "cloudposse/template" version = ">= 2.0" } local = { diff --git a/deprecated/aws/organization/main.tf b/deprecated/aws/organization/main.tf index d02f4cbcd..e532c95fb 100644 --- a/deprecated/aws/organization/main.tf +++ b/deprecated/aws/organization/main.tf @@ -8,41 +8,41 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "organization_feature_set" { - type = "string" + type = string default = "ALL" description = "`ALL` (default) or `CONSOLIDATED_BILLING`" } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } resource "aws_organizations_organization" "default" { - feature_set = "${var.organization_feature_set}" + feature_set = var.organization_feature_set } output "organization_id" { - value = "${aws_organizations_organization.default.id}" + value = aws_organizations_organization.default.id } output "organization_arn" { - value = "${aws_organizations_organization.default.arn}" + value = aws_organizations_organization.default.arn } output "organization_master_account_id" { - value = "${aws_organizations_organization.default.master_account_id}" + value = aws_organizations_organization.default.master_account_id } output "organization_master_account_arn" { - value = "${aws_organizations_organization.default.master_account_arn}" + value = aws_organizations_organization.default.master_account_arn } output "organization_master_account_email" { - value = "${aws_organizations_organization.default.master_account_email}" + value = aws_organizations_organization.default.master_account_email } diff --git a/deprecated/aws/root-dns/main.tf b/deprecated/aws/root-dns/main.tf index bdccc4ab8..60a335b49 100644 --- a/deprecated/aws/root-dns/main.tf +++ b/deprecated/aws/root-dns/main.tf @@ -5,19 +5,19 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" {} variable "accounts_enabled" { - type = "list" + type = list(string) description = "Accounts to enable" default = ["dev", "staging", "prod", "testing", "audit"] } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } diff --git a/deprecated/aws/root-dns/ns/main.tf b/deprecated/aws/root-dns/ns/main.tf index 4b61edf80..b5616833e 100644 --- a/deprecated/aws/root-dns/ns/main.tf +++ b/deprecated/aws/root-dns/ns/main.tf @@ -1,52 +1,52 @@ locals { - enabled = "${contains(var.accounts_enabled, var.stage) == true}" - account = "${length(var.account) > 0 ? var.account : var.stage}" + enabled = contains(var.accounts_enabled, var.stage) == true + account = length(var.account) > 0 ? var.account : var.stage } module "label" { source = "git::https://github.com/cloudposse/terraform-null-label.git?ref=tags/0.5.4" - enabled = "${local.enabled ? "true" : "false"}" - namespace = "${var.namespace}" - stage = "${local.account}" - name = "${var.name}" - delimiter = "${var.delimiter}" - attributes = "${var.attributes}" - tags = "${var.tags}" + enabled = local.enabled ? "true" : "false" + namespace = var.namespace + stage = local.account + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags } # Fetch the OrganizationAccountAccessRole ARNs from SSM module "organization_account_access_role_arn" { - enabled = "${local.enabled ? "true" : "false"}" + enabled = local.enabled ? "true" : "false" source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" parameter_read = ["/${var.namespace}/${local.account}/organization_account_access_role"] } locals { - role_arn_values = "${module.organization_account_access_role_arn.values}" + role_arn_values = module.organization_account_access_role_arn.values } data "terraform_remote_state" "stage" { - count = "${local.enabled ? 1 : 0}" + count = local.enabled ? 1 : 0 backend = "s3" # This assumes stage is using a `terraform-aws-tfstate-backend` # https://github.com/cloudposse/terraform-aws-tfstate-backend config { - role_arn = "${local.role_arn_values[0]}" - bucket = "${module.label.id}" - key = "${var.key}" + role_arn = local.role_arn_values[0] + bucket = module.label.id + key = var.key } } locals { - name_servers = "${flatten(data.terraform_remote_state.stage.*.name_servers)}" + name_servers = flatten(data.terraform_remote_state.stage.*.name_servers) } resource "aws_route53_record" "dns_zone_ns" { - count = "${local.enabled ? 1 : 0}" - zone_id = "${var.zone_id}" - name = "${var.stage}" + count = local.enabled ? 1 : 0 + zone_id = var.zone_id + name = var.stage type = "NS" - ttl = "${var.ttl}" + ttl = var.ttl records = ["${local.name_servers}"] } diff --git a/deprecated/aws/root-dns/ns/outputs.tf b/deprecated/aws/root-dns/ns/outputs.tf index c0043cb74..4cf060185 100644 --- a/deprecated/aws/root-dns/ns/outputs.tf +++ b/deprecated/aws/root-dns/ns/outputs.tf @@ -1,9 +1,9 @@ output "stage" { description = "Name of the subaccount corresponding to the name servers" - value = "${var.stage}" + value = var.stage } output "name_servers" { description = "Name servers for the account's delegated DNS zone" - value = "${local.name_servers}" + value = local.name_servers } diff --git a/deprecated/aws/root-dns/ns/variables.tf b/deprecated/aws/root-dns/ns/variables.tf index 94002a645..3021d44ab 100644 --- a/deprecated/aws/root-dns/ns/variables.tf +++ b/deprecated/aws/root-dns/ns/variables.tf @@ -1,39 +1,39 @@ variable "accounts_enabled" { - type = "list" + type = list(string) description = "Accounts to enable" default = ["dev", "staging", "prod", "testing", "audit"] } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `eg` or `example`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string default = "terraform" description = "Name (e.g. `app` or `cluster`)" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter to be used between `namespace`, `stage`, `name`, and `attributes`" } variable "attributes" { - type = "list" + type = list(string) default = ["state"] description = "Additional attributes (e.g. `state`)" } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)" } @@ -54,6 +54,6 @@ variable "key" { variable "account" { description = "If set, then it will be used instead of 'stage' to assume role. This is useful when you need another domain for existing stage" - type = "string" + type = string default = "" } diff --git a/deprecated/aws/root-dns/parent-alerts-ns.tf b/deprecated/aws/root-dns/parent-alerts-ns.tf index 93ccb28cf..413aa2798 100644 --- a/deprecated/aws/root-dns/parent-alerts-ns.tf +++ b/deprecated/aws/root-dns/parent-alerts-ns.tf @@ -1,11 +1,11 @@ module "alerts" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "alerts" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "alerts_name_servers" { - value = "${module.alerts.name_servers}" + value = module.alerts.name_servers } diff --git a/deprecated/aws/root-dns/parent-audit-ns.tf b/deprecated/aws/root-dns/parent-audit-ns.tf index b993a1bfa..3c88f495c 100644 --- a/deprecated/aws/root-dns/parent-audit-ns.tf +++ b/deprecated/aws/root-dns/parent-audit-ns.tf @@ -1,11 +1,11 @@ module "audit" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "audit" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "audit_name_servers" { - value = "${module.audit.name_servers}" + value = module.audit.name_servers } diff --git a/deprecated/aws/root-dns/parent-corp-ns.tf b/deprecated/aws/root-dns/parent-corp-ns.tf index 14e8caa27..835ff9b99 100644 --- a/deprecated/aws/root-dns/parent-corp-ns.tf +++ b/deprecated/aws/root-dns/parent-corp-ns.tf @@ -1,11 +1,11 @@ module "corp" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "corp" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "corp_name_servers" { - value = "${module.corp.name_servers}" + value = module.corp.name_servers } diff --git a/deprecated/aws/root-dns/parent-data-ns.tf b/deprecated/aws/root-dns/parent-data-ns.tf index 53ca6e730..a5933c5e5 100644 --- a/deprecated/aws/root-dns/parent-data-ns.tf +++ b/deprecated/aws/root-dns/parent-data-ns.tf @@ -1,11 +1,11 @@ module "data" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "data" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "data_name_servers" { - value = "${module.data.name_servers}" + value = module.data.name_servers } diff --git a/deprecated/aws/root-dns/parent-dev-ns.tf b/deprecated/aws/root-dns/parent-dev-ns.tf index 40564113c..b92fccab5 100644 --- a/deprecated/aws/root-dns/parent-dev-ns.tf +++ b/deprecated/aws/root-dns/parent-dev-ns.tf @@ -1,11 +1,11 @@ module "dev" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "dev" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "dev_name_servers" { - value = "${module.dev.name_servers}" + value = module.dev.name_servers } diff --git a/deprecated/aws/root-dns/parent-identity-ns.tf b/deprecated/aws/root-dns/parent-identity-ns.tf index fc6e31148..c7a1f3058 100644 --- a/deprecated/aws/root-dns/parent-identity-ns.tf +++ b/deprecated/aws/root-dns/parent-identity-ns.tf @@ -1,11 +1,11 @@ module "identity" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "identity" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "identity_name_servers" { - value = "${module.identity.name_servers}" + value = module.identity.name_servers } diff --git a/deprecated/aws/root-dns/parent-local-ns.tf b/deprecated/aws/root-dns/parent-local-ns.tf index 80fa3c24d..074282959 100644 --- a/deprecated/aws/root-dns/parent-local-ns.tf +++ b/deprecated/aws/root-dns/parent-local-ns.tf @@ -1,5 +1,5 @@ resource "aws_route53_record" "local_dns_name" { - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id name = "local" type = "A" ttl = "30" @@ -7,7 +7,7 @@ resource "aws_route53_record" "local_dns_name" { } resource "aws_route53_record" "local_dns_wildcard" { - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id name = "*.local" type = "A" ttl = "30" diff --git a/deprecated/aws/root-dns/parent-prod-ns.tf b/deprecated/aws/root-dns/parent-prod-ns.tf index f09a15418..09786a4bd 100644 --- a/deprecated/aws/root-dns/parent-prod-ns.tf +++ b/deprecated/aws/root-dns/parent-prod-ns.tf @@ -1,11 +1,11 @@ module "prod" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "prod" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "prod_name_servers" { - value = "${module.prod.name_servers}" + value = module.prod.name_servers } diff --git a/deprecated/aws/root-dns/parent-qa-ns.tf b/deprecated/aws/root-dns/parent-qa-ns.tf index 4af999b3a..7e4ced51d 100644 --- a/deprecated/aws/root-dns/parent-qa-ns.tf +++ b/deprecated/aws/root-dns/parent-qa-ns.tf @@ -1,13 +1,13 @@ module "qa" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "qa" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id account = "staging" key = "qa-dns/terraform.tfstate" } output "qa_name_servers" { - value = "${module.qa.name_servers}" + value = module.qa.name_servers } diff --git a/deprecated/aws/root-dns/parent-security-ns.tf b/deprecated/aws/root-dns/parent-security-ns.tf index c2f29892d..5b4021de8 100644 --- a/deprecated/aws/root-dns/parent-security-ns.tf +++ b/deprecated/aws/root-dns/parent-security-ns.tf @@ -1,11 +1,11 @@ module "security" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "security" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "security_name_servers" { - value = "${module.security.name_servers}" + value = module.security.name_servers } diff --git a/deprecated/aws/root-dns/parent-staging-ns.tf b/deprecated/aws/root-dns/parent-staging-ns.tf index 025c2ec6d..e97ca60a8 100644 --- a/deprecated/aws/root-dns/parent-staging-ns.tf +++ b/deprecated/aws/root-dns/parent-staging-ns.tf @@ -1,11 +1,11 @@ module "staging" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "staging" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "staging_name_servers" { - value = "${module.staging.name_servers}" + value = module.staging.name_servers } diff --git a/deprecated/aws/root-dns/parent-testing-ns.tf b/deprecated/aws/root-dns/parent-testing-ns.tf index 304eeb4c9..9b83b5492 100644 --- a/deprecated/aws/root-dns/parent-testing-ns.tf +++ b/deprecated/aws/root-dns/parent-testing-ns.tf @@ -1,11 +1,11 @@ module "testing" { source = "ns" - accounts_enabled = "${var.accounts_enabled}" - namespace = "${var.namespace}" + accounts_enabled = var.accounts_enabled + namespace = var.namespace stage = "testing" - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id } output "testing_name_servers" { - value = "${module.testing.name_servers}" + value = module.testing.name_servers } diff --git a/deprecated/aws/root-dns/parent.tf b/deprecated/aws/root-dns/parent.tf index 332b51ebe..af1245374 100644 --- a/deprecated/aws/root-dns/parent.tf +++ b/deprecated/aws/root-dns/parent.tf @@ -1,17 +1,17 @@ variable "parent_domain_name" { - type = "string" + type = string description = "Parent domain name" } resource "aws_route53_zone" "parent_dns_zone" { - name = "${var.parent_domain_name}" + name = var.parent_domain_name comment = "Parent domain name" } resource "aws_route53_record" "parent_dns_zone_soa" { allow_overwrite = true - zone_id = "${aws_route53_zone.parent_dns_zone.id}" - name = "${aws_route53_zone.parent_dns_zone.name}" + zone_id = aws_route53_zone.parent_dns_zone.id + name = aws_route53_zone.parent_dns_zone.name type = "SOA" ttl = "60" @@ -21,9 +21,9 @@ resource "aws_route53_record" "parent_dns_zone_soa" { } output "parent_zone_id" { - value = "${aws_route53_zone.parent_dns_zone.zone_id}" + value = aws_route53_zone.parent_dns_zone.zone_id } output "parent_name_servers" { - value = "${aws_route53_zone.parent_dns_zone.name_servers}" + value = aws_route53_zone.parent_dns_zone.name_servers } diff --git a/deprecated/aws/root-dns/root.tf b/deprecated/aws/root-dns/root.tf index 022d76dfc..e7d9ab3c5 100644 --- a/deprecated/aws/root-dns/root.tf +++ b/deprecated/aws/root-dns/root.tf @@ -1,17 +1,17 @@ variable "root_domain_name" { - type = "string" + type = string description = "Root domain name" } resource "aws_route53_zone" "root_dns_zone" { - name = "${var.root_domain_name}" + name = var.root_domain_name comment = "DNS Zone for Root Account" } resource "aws_route53_record" "root_dns_zone_soa" { allow_overwrite = true - zone_id = "${aws_route53_zone.root_dns_zone.id}" - name = "${aws_route53_zone.root_dns_zone.name}" + zone_id = aws_route53_zone.root_dns_zone.id + name = aws_route53_zone.root_dns_zone.name type = "SOA" ttl = "60" @@ -21,7 +21,7 @@ resource "aws_route53_record" "root_dns_zone_soa" { } resource "aws_route53_record" "root_dns_zone_ns" { - zone_id = "${aws_route53_zone.parent_dns_zone.zone_id}" + zone_id = aws_route53_zone.parent_dns_zone.zone_id name = "root" type = "NS" ttl = "30" @@ -29,9 +29,9 @@ resource "aws_route53_record" "root_dns_zone_ns" { } output "root_zone_id" { - value = "${aws_route53_zone.root_dns_zone.zone_id}" + value = aws_route53_zone.root_dns_zone.zone_id } output "root_name_servers" { - value = "${aws_route53_zone.root_dns_zone.name_servers}" + value = aws_route53_zone.root_dns_zone.name_servers } diff --git a/deprecated/aws/root-iam/README.md b/deprecated/aws/root-iam/README.md index 623838e72..f021fca9b 100644 --- a/deprecated/aws/root-iam/README.md +++ b/deprecated/aws/root-iam/README.md @@ -1,5 +1,5 @@ # root-iam -This module is responsible for setting up the access groups in the root account. +This module is responsible for setting up the access groups in the root account. If provisioning this during a cold-start process, make sure you have `TF_VAR_aws_assume_role_arn` set to nil. diff --git a/deprecated/aws/root-iam/main.tf b/deprecated/aws/root-iam/main.tf index 44acbb528..dc8b18ba6 100644 --- a/deprecated/aws/root-iam/main.tf +++ b/deprecated/aws/root-iam/main.tf @@ -6,6 +6,6 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } diff --git a/deprecated/aws/root-iam/root.tf b/deprecated/aws/root-iam/root.tf index 0ee96d510..03c2a5fdb 100644 --- a/deprecated/aws/root-iam/root.tf +++ b/deprecated/aws/root-iam/root.tf @@ -1,11 +1,11 @@ variable "root_account_admin_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant admin access to Root account" default = [] } variable "root_account_readonly_user_names" { - type = "list" + type = list(string) description = "IAM user names to grant readonly access to Root account" default = [] } @@ -13,7 +13,7 @@ variable "root_account_readonly_user_names" { # Provision group access to root account with MFA module "organization_access_group_root" { source = "git::https://github.com/cloudposse/terraform-aws-iam-assumed-roles.git?ref=tags/0.6.0" - namespace = "${var.namespace}" + namespace = var.namespace stage = "root" admin_name = "admin" readonly_name = "readonly" @@ -43,19 +43,19 @@ module "organization_access_group_ssm_root" { } output "admin_group" { - value = "${module.organization_access_group_root.group_admin_name}" + value = module.organization_access_group_root.group_admin_name } output "admin_switchrole_url" { description = "URL to the IAM console to switch to the admin role" - value = "${module.organization_access_group_root.switchrole_admin_url}" + value = module.organization_access_group_root.switchrole_admin_url } output "readonly_group" { - value = "${module.organization_access_group_root.group_readonly_name}" + value = module.organization_access_group_root.group_readonly_name } output "readonly_switchrole_url" { description = "URL to the IAM console to switch to the readonly role" - value = "${module.organization_access_group_root.switchrole_readonly_url}" + value = module.organization_access_group_root.switchrole_readonly_url } diff --git a/deprecated/aws/root-iam/variables.tf b/deprecated/aws/root-iam/variables.tf index a80b06405..ec6c40496 100644 --- a/deprecated/aws/root-iam/variables.tf +++ b/deprecated/aws/root-iam/variables.tf @@ -1,13 +1,13 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } diff --git a/deprecated/aws/security-baseline/main.tf b/deprecated/aws/security-baseline/main.tf index 20c3d36c8..21bb140eb 100644 --- a/deprecated/aws/security-baseline/main.tf +++ b/deprecated/aws/security-baseline/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -19,7 +19,7 @@ data "aws_vpc" "default" { } resource "aws_default_security_group" "default" { - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id tags = { Name = "Default Security Group" @@ -30,15 +30,15 @@ module "flow_logs" { source = "git::https://github.com/cloudposse/terraform-aws-vpc-flow-logs-s3-bucket.git?ref=tags/0.1.0" name = "vpc" - namespace = "${var.namespace}" - stage = "${var.stage}" - tags = "${var.tags}" - attributes = "${concat(list("default"), var.attributes, list("flow-logs"))}" - delimiter = "${var.delimiter}" + namespace = var.namespace + stage = var.stage + tags = var.tags + attributes = concat(list("default"), var.attributes, list("flow-logs")) + delimiter = var.delimiter - region = "${var.region}" + region = var.region - enabled = "${var.flow_logs_enabled}" + enabled = var.flow_logs_enabled - vpc_id = "${data.aws_vpc.default.id}" + vpc_id = data.aws_vpc.default.id } diff --git a/deprecated/aws/security-baseline/output.tf b/deprecated/aws/security-baseline/output.tf index a1d44c807..730ed3346 100644 --- a/deprecated/aws/security-baseline/output.tf +++ b/deprecated/aws/security-baseline/output.tf @@ -1,44 +1,44 @@ output "flow_logs_kms_key_arn" { - value = "${module.flow_logs.kms_key_arn}" + value = module.flow_logs.kms_key_arn description = "Flow logs KMS Key ARN" } output "flow_logs_kms_key_id" { - value = "${module.flow_logs.kms_key_id}" + value = module.flow_logs.kms_key_id description = "Flow logs KMS Key ID" } output "flow_logs_kms_alias_arn" { - value = "${module.flow_logs.kms_alias_arn}" + value = module.flow_logs.kms_alias_arn description = "Flow logs KMS Alias ARN" } output "flow_logs_kms_alias_name" { - value = "${module.flow_logs.kms_alias_name}" + value = module.flow_logs.kms_alias_name description = "Flow logs KMS Alias name" } output "flow_logs_bucket_domain_name" { - value = "${module.flow_logs.bucket_domain_name}" + value = module.flow_logs.bucket_domain_name description = "Flow logs FQDN of bucket" } output "flow_logs_bucket_id" { - value = "${module.flow_logs.bucket_id}" + value = module.flow_logs.bucket_id description = "Flow logs bucket Name (aka ID)" } output "flow_logs_bucket_arn" { - value = "${module.flow_logs.bucket_arn}" + value = module.flow_logs.bucket_arn description = "Flow logs bucket ARN" } output "flow_logs_bucket_prefix" { - value = "${module.flow_logs.bucket_prefix}" + value = module.flow_logs.bucket_prefix description = "Flow logs bucket prefix configured for lifecycle rules" } output "flow_logs_id" { - value = "${module.flow_logs.id}" + value = module.flow_logs.id description = "Flow logs ID" } diff --git a/deprecated/aws/security-baseline/variables.tf b/deprecated/aws/security-baseline/variables.tf index d05414de3..6b4b46d89 100644 --- a/deprecated/aws/security-baseline/variables.tf +++ b/deprecated/aws/security-baseline/variables.tf @@ -1,5 +1,5 @@ variable "aws_assume_role_arn" { - type = "string" + type = string } variable "enabled" { @@ -8,29 +8,29 @@ variable "enabled" { } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter between `name`, `namespace`, `stage` and `attributes`" } variable "attributes" { - type = "list" + type = list(string) description = "Additional attributes (_e.g._ \"1\")" default = [] } variable "tags" { - type = "map" + type = map(string) description = "Additional tags (_e.g._ map(\"BusinessUnit\",\"ABC\")" default = {} } diff --git a/deprecated/aws/sentry/aurora-postgres.tf b/deprecated/aws/sentry/aurora-postgres.tf index e55324833..79685bea9 100644 --- a/deprecated/aws/sentry/aurora-postgres.tf +++ b/deprecated/aws/sentry/aurora-postgres.tf @@ -4,7 +4,7 @@ variable "postgres_name" { default = "postgres" } -# Don't use `admin` +# Don't use `admin` # Read more: # ("MasterUsername admin cannot be used as it is a reserved word used by the engine") variable "postgres_admin_user" { @@ -30,13 +30,13 @@ variable "postgres_db_name" { variable "aurora_postgres_engine_version" { type = string - description = "Database Engine Version for Aurora PostgeSQL" + description = "Database Engine Version for Aurora postgresql" default = "9.6.12" } variable "aurora_postgres_cluster_family" { type = string - description = "Database Engine Version for Aurora PostgeSQL" + description = "Database Engine Version for Aurora postgresql" default = "9.6.12" } diff --git a/deprecated/aws/sentry/variables.tf b/deprecated/aws/sentry/variables.tf index f3a94cf6f..ab3015b65 100644 --- a/deprecated/aws/sentry/variables.tf +++ b/deprecated/aws/sentry/variables.tf @@ -28,4 +28,3 @@ variable "chamber_service" { default = "kops" description = "`chamber` service name. See [chamber usage](https://github.com/segmentio/chamber#usage) for more details" } - diff --git a/deprecated/aws/sentry/versions.tf b/deprecated/aws/sentry/versions.tf index 2bdc83477..7a4900af9 100644 --- a/deprecated/aws/sentry/versions.tf +++ b/deprecated/aws/sentry/versions.tf @@ -6,4 +6,3 @@ terraform { random = "~> 2.2" } } - diff --git a/deprecated/aws/ses/Makefile b/deprecated/aws/ses/Makefile index a233f0377..ebccc7c29 100644 --- a/deprecated/aws/ses/Makefile +++ b/deprecated/aws/ses/Makefile @@ -15,4 +15,3 @@ apply console destroy graph plan output providers show: init ## Pass arguments through to terraform which do not require remote state get fmt validate version: terraform $@ - diff --git a/deprecated/aws/ses/emails.tf b/deprecated/aws/ses/emails.tf index 85ef9148c..19aad1cbd 100644 --- a/deprecated/aws/ses/emails.tf +++ b/deprecated/aws/ses/emails.tf @@ -3,7 +3,7 @@ variable "relay_email" { } variable "forward_emails" { - type = "map" + type = map(string) default = { "ops@example.com" = ["example@gmail.com"] diff --git a/deprecated/aws/ses/main.tf b/deprecated/aws/ses/main.tf index bfe46c5ac..cd18ea722 100644 --- a/deprecated/aws/ses/main.tf +++ b/deprecated/aws/ses/main.tf @@ -6,54 +6,54 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } - region = "${var.ses_region}" + region = var.ses_region } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "ses_region" { - type = "string" + type = string description = "AWS Region the SES should reside in" default = "us-west-2" } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "ses_name" { - type = "string" + type = string description = "Application or solution name (e.g. `app`)" default = "ses" } variable "parent_domain_name" { - type = "string" + type = string description = "Root domain name" } module "ses" { source = "git::https://github.com/cloudposse/terraform-aws-ses-lambda-forwarder.git?ref=tags/0.2.0" - namespace = "${var.namespace}" - name = "${var.ses_name}" - stage = "${var.stage}" + namespace = var.namespace + name = var.ses_name + stage = var.stage - region = "${var.ses_region}" + region = var.ses_region - relay_email = "${var.relay_email}" - domain = "${var.parent_domain_name}" + relay_email = var.relay_email + domain = var.parent_domain_name - forward_emails = "${var.forward_emails}" + forward_emails = var.forward_emails } diff --git a/deprecated/aws/slack-archive/main.tf b/deprecated/aws/slack-archive/main.tf index 5536ebfdc..ff1b923c1 100644 --- a/deprecated/aws/slack-archive/main.tf +++ b/deprecated/aws/slack-archive/main.tf @@ -5,38 +5,38 @@ terraform { } variable "aws_assume_role_arn" { - type = "string" + type = string description = "The Amazon Resource Name (ARN) of the role to assume." } variable "domain_name" { - type = "string" + type = string description = "Domain name for Slack Archive" } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `eg` or `cp`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "region" { - type = "string" + type = string description = "AWS region" } variable "account_id" { - type = "string" + type = string description = "AWS account ID" } provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -45,14 +45,14 @@ provider "aws" { region = "us-east-1" assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } # https://www.terraform.io/docs/providers/aws/d/acm_certificate.html data "aws_acm_certificate" "acm_cloudfront_certificate" { provider = "aws.virginia" - domain = "${var.domain_name}" + domain = var.domain_name statuses = ["ISSUED"] types = ["AMAZON_ISSUED"] } @@ -65,19 +65,19 @@ locals { module "slack_archive_user" { source = "git::https://github.com/cloudposse/terraform-aws-iam-system-user.git?ref=tags/0.2.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" + namespace = var.namespace + stage = var.stage + name = local.name } module "origin" { source = "git::https://github.com/cloudposse/terraform-aws-s3-website.git?ref=tags/0.5.2" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" - hostname = "${local.cdn_domain}" - parent_zone_name = "${var.domain_name}" - region = "${var.region}" + namespace = var.namespace + stage = var.stage + name = local.name + hostname = local.cdn_domain + parent_zone_name = var.domain_name + region = var.region cors_allowed_headers = ["*"] cors_allowed_methods = ["GET"] cors_allowed_origins = ["*"] @@ -102,14 +102,14 @@ module "origin" { # CloudFront CDN fronting origin module "cdn" { source = "git::https://github.com/cloudposse/terraform-aws-cloudfront-cdn.git?ref=tags/0.4.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${local.name}" + namespace = var.namespace + stage = var.stage + name = local.name aliases = ["${local.cdn_domain}", "archive.sweetops.com"] - origin_domain_name = "${module.origin.s3_bucket_website_endpoint}" + origin_domain_name = module.origin.s3_bucket_website_endpoint origin_protocol_policy = "http-only" viewer_protocol_policy = "redirect-to-https" - parent_zone_name = "${var.domain_name}" + parent_zone_name = var.domain_name forward_cookies = "none" forward_headers = ["Origin", "Access-Control-Request-Headers", "Access-Control-Request-Method"] default_ttl = 60 @@ -120,5 +120,5 @@ module "cdn" { allowed_methods = ["GET", "HEAD", "OPTIONS"] price_class = "PriceClass_All" default_root_object = "index.html" - acm_certificate_arn = "${data.aws_acm_certificate.acm_cloudfront_certificate.arn}" + acm_certificate_arn = data.aws_acm_certificate.acm_cloudfront_certificate.arn } diff --git a/deprecated/aws/slack-archive/outputs.tf b/deprecated/aws/slack-archive/outputs.tf index 2fc48a995..0dc4f9bc3 100644 --- a/deprecated/aws/slack-archive/outputs.tf +++ b/deprecated/aws/slack-archive/outputs.tf @@ -1,82 +1,82 @@ output "slack_archive_user_name" { - value = "${module.slack_archive_user.user_name}" + value = module.slack_archive_user.user_name description = "Normalized IAM user name" } output "slack_archive_user_arn" { - value = "${module.slack_archive_user.user_arn}" + value = module.slack_archive_user.user_arn description = "The ARN assigned by AWS for the user" } output "slack_archive_user_unique_id" { - value = "${module.slack_archive_user.user_unique_id}" + value = module.slack_archive_user.user_unique_id description = "The user unique ID assigned by AWS" } output "slack_archive_user_access_key_id" { - value = "${module.slack_archive_user.access_key_id}" + value = module.slack_archive_user.access_key_id description = "The access key ID" sensitive = true } output "slack_archive_user_secret_access_key" { - value = "${module.slack_archive_user.secret_access_key}" + value = module.slack_archive_user.secret_access_key description = "The secret access key. This will be written to the state file in plain-text" sensitive = true } output "slack_archive_s3_bucket_name" { - value = "${module.origin.s3_bucket_name}" + value = module.origin.s3_bucket_name } output "slack_archive_s3_bucket_domain_name" { - value = "${module.origin.s3_bucket_domain_name}" + value = module.origin.s3_bucket_domain_name } output "slack_archive_s3_bucket_arn" { - value = "${module.origin.s3_bucket_arn}" + value = module.origin.s3_bucket_arn } output "slack_archive_s3_bucket_website_endpoint" { - value = "${module.origin.s3_bucket_website_endpoint}" + value = module.origin.s3_bucket_website_endpoint } output "slack_archive_s3_bucket_website_domain" { - value = "${module.origin.s3_bucket_website_domain}" + value = module.origin.s3_bucket_website_domain } output "slack_archive_s3_bucket_hosted_zone_id" { - value = "${module.origin.s3_bucket_hosted_zone_id}" + value = module.origin.s3_bucket_hosted_zone_id } output "slack_archive_cloudfront_id" { - value = "${module.cdn.cf_id}" + value = module.cdn.cf_id } output "slack_archive_cloudfront_arn" { - value = "${module.cdn.cf_arn}" + value = module.cdn.cf_arn } output "slack_archive_cloudfront_aliases" { - value = "${module.cdn.cf_aliases}" + value = module.cdn.cf_aliases } output "slack_archive_cloudfront_status" { - value = "${module.cdn.cf_status}" + value = module.cdn.cf_status } output "slack_archive_cloudfront_domain_name" { - value = "${module.cdn.cf_domain_name}" + value = module.cdn.cf_domain_name } output "slack_archive_cloudfront_etag" { - value = "${module.cdn.cf_etag}" + value = module.cdn.cf_etag } output "slack_archive_cloudfront_hosted_zone_id" { - value = "${module.cdn.cf_hosted_zone_id}" + value = module.cdn.cf_hosted_zone_id } output "slack_archive_cloudfront_origin_access_identity_path" { - value = "${module.cdn.cf_origin_access_identity}" + value = module.cdn.cf_origin_access_identity } diff --git a/deprecated/aws/sns-topic/main.tf b/deprecated/aws/sns-topic/main.tf index 61de64064..1508e7e12 100644 --- a/deprecated/aws/sns-topic/main.tf +++ b/deprecated/aws/sns-topic/main.tf @@ -28,4 +28,3 @@ module "sns_monitoring" { sns_topic_name = module.sns.sns_topic.name sns_topic_alarms_arn = module.sns.sns_topic.arn } - diff --git a/deprecated/aws/spotinst-kops/variables.tf b/deprecated/aws/spotinst-kops/variables.tf index f6b3581dd..2bca98fbf 100644 --- a/deprecated/aws/spotinst-kops/variables.tf +++ b/deprecated/aws/spotinst-kops/variables.tf @@ -203,4 +203,3 @@ variable "roll_batch_size_percentage" { default = 33 description = "Sets the percentage of the instances to deploy in each batch" } - diff --git a/deprecated/aws/spotinst/main.tf b/deprecated/aws/spotinst/main.tf index 5ae2a1e4a..d1d20cf70 100644 --- a/deprecated/aws/spotinst/main.tf +++ b/deprecated/aws/spotinst/main.tf @@ -79,4 +79,3 @@ module "default" { template_url = local.template_url capabilities = var.capabilities } - diff --git a/deprecated/aws/spotinst/output.tf b/deprecated/aws/spotinst/output.tf index 35a12b4b9..57c02e3b2 100644 --- a/deprecated/aws/spotinst/output.tf +++ b/deprecated/aws/spotinst/output.tf @@ -9,4 +9,3 @@ output "id" { output "outputs" { value = module.default.outputs } - diff --git a/deprecated/aws/spotinst/variables.tf b/deprecated/aws/spotinst/variables.tf index 8970db6f5..e16230caf 100644 --- a/deprecated/aws/spotinst/variables.tf +++ b/deprecated/aws/spotinst/variables.tf @@ -104,4 +104,3 @@ variable "override_token" { default = "" description = "Override Spotinst token" } - diff --git a/deprecated/aws/teleport/main.tf b/deprecated/aws/teleport/main.tf index fdf351951..214f5f788 100644 --- a/deprecated/aws/teleport/main.tf +++ b/deprecated/aws/teleport/main.tf @@ -6,52 +6,52 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } module "teleport_backend" { source = "git::https://github.com/cloudposse/terraform-aws-teleport-storage.git?ref=tags/0.4.0" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name attributes = [] - tags = "${var.tags}" - prefix = "${var.s3_prefix}" - standard_transition_days = "${var.s3_standard_transition_days}" - glacier_transition_days = "${var.s3_glacier_transition_days}" - expiration_days = "${var.s3_expiration_days}" + tags = var.tags + prefix = var.s3_prefix + standard_transition_days = var.s3_standard_transition_days + glacier_transition_days = var.s3_glacier_transition_days + expiration_days = var.s3_expiration_days - iam_role_max_session_duration = "${var.iam_role_max_session_duration}" + iam_role_max_session_duration = var.iam_role_max_session_duration # Autoscale min_read and min_write capacity will set the provisioned capacity for both cluster state and audit events - autoscale_min_read_capacity = "${var.autoscale_min_read_capacity}" - autoscale_min_write_capacity = "${var.autoscale_min_write_capacity}" + autoscale_min_read_capacity = var.autoscale_min_read_capacity + autoscale_min_write_capacity = var.autoscale_min_write_capacity # Currently the autoscalers for the cluster state and the audit events share the same settings - autoscale_read_target = "${var.autoscale_read_target}" - autoscale_write_target = "${var.autoscale_write_target}" - autoscale_max_read_capacity = "${var.autoscale_max_read_capacity}" - autoscale_max_write_capacity = "${var.autoscale_max_write_capacity}" + autoscale_read_target = var.autoscale_read_target + autoscale_write_target = var.autoscale_write_target + autoscale_max_read_capacity = var.autoscale_max_read_capacity + autoscale_max_write_capacity = var.autoscale_max_write_capacity } module "teleport_role_name" { source = "git::https://github.com/cloudposse/terraform-null-label.git?ref=tags/0.3.3" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - delimiter = "${var.delimiter}" + namespace = var.namespace + stage = var.stage + name = var.name + delimiter = var.delimiter attributes = ["auth"] - tags = "${var.tags}" + tags = var.tags } module "kops_metadata" { source = "git::https://github.com/cloudposse/terraform-aws-kops-data-iam.git?ref=tags/0.1.0" - cluster_name = "${var.cluster_name}" + cluster_name = var.cluster_name } locals { - chamber_service = "${var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service}" + chamber_service = var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service kops_arns = { masters = ["${module.kops_metadata.masters_role_arn}"] @@ -82,16 +82,16 @@ data "aws_iam_policy_document" "assume_role" { } resource "aws_iam_role" "teleport" { - name = "${module.teleport_role_name.id}" - assume_role_policy = "${data.aws_iam_policy_document.assume_role.json}" - max_session_duration = "${var.iam_role_max_session_duration}" + name = module.teleport_role_name.id + assume_role_policy = data.aws_iam_policy_document.assume_role.json + max_session_duration = var.iam_role_max_session_duration description = "The Teleport role to access teleport backend" } data "aws_iam_policy_document" "teleport" { // Teleport can use LetsEncrypt to get TLS certificates. For this // it needs additional permissions which are not included here. - // Teleport can use SSM to publish "join tokens" and retreive the enterprise // license, but that is not fully documented, so permissions to access SSM // are not included at this time. + // Teleport can use SSM to publish "join tokens" and retrieve the enterprise // license, but that is not fully documented, so permissions to access SSM // are not included at this time. // S3 permissions are needed to save and replay SSH sessions statement { @@ -131,18 +131,18 @@ data "aws_iam_policy_document" "teleport" { } resource "aws_iam_policy" "teleport" { - name = "${module.teleport_role_name.id}" + name = module.teleport_role_name.id description = "Grant permissions for teleport" - policy = "${data.aws_iam_policy_document.teleport.json}" + policy = data.aws_iam_policy_document.teleport.json } resource "aws_iam_role_policy_attachment" "teleport" { - role = "${aws_iam_role.teleport.name}" - policy_arn = "${aws_iam_policy.teleport.arn}" + role = aws_iam_role.teleport.name + policy_arn = aws_iam_policy.teleport.arn } resource "aws_ssm_parameter" "teleport_audit_sessions_uri" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "teleport_audit_sessions_uri")}" + name = format(var.chamber_parameter_name, local.chamber_service, "teleport_audit_sessions_uri") value = "s3://${module.teleport_backend.s3_bucket_id}" description = "Teleport session logs storage URI" type = "String" @@ -150,7 +150,7 @@ resource "aws_ssm_parameter" "teleport_audit_sessions_uri" { } resource "aws_ssm_parameter" "teleport_audit_events_uri" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "teleport_audit_events_uri")}" + name = format(var.chamber_parameter_name, local.chamber_service, "teleport_audit_events_uri") value = "dynamodb://${module.teleport_backend.dynamodb_audit_table_id}" description = "Teleport audite events storage URI" type = "String" @@ -158,24 +158,24 @@ resource "aws_ssm_parameter" "teleport_audit_events_uri" { } resource "aws_ssm_parameter" "teleport_cluster_state_dynamodb_table" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "teleport_cluster_state_dynamodb_table")}" - value = "${module.teleport_backend.dynamodb_state_table_id}" + name = format(var.chamber_parameter_name, local.chamber_service, "teleport_cluster_state_dynamodb_table") + value = module.teleport_backend.dynamodb_state_table_id description = "Teleport cluster state storage dynamodb table" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "teleport_auth_iam_role" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "teleport_auth_iam_role")}" - value = "${aws_iam_role.teleport.name}" + name = format(var.chamber_parameter_name, local.chamber_service, "teleport_auth_iam_role") + value = aws_iam_role.teleport.name description = "Teleport auth IAM role" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "teleport_kubernetes_namespace" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "teleport_kubernetes_namespace")}" - value = "${var.kubernetes_namespace}" + name = format(var.chamber_parameter_name, local.chamber_service, "teleport_kubernetes_namespace") + value = var.kubernetes_namespace description = "Teleport auth IAM role" type = "String" overwrite = "true" @@ -186,35 +186,35 @@ locals { } resource "random_string" "tokens" { - count = "${length(local.token_names)}" + count = length(local.token_names) length = 32 special = false keepers { - cluster_name = "${var.cluster_name}" + cluster_name = var.cluster_name } } resource "aws_ssm_parameter" "teleport_tokens" { - count = "${length(local.token_names)}" - name = "${format(var.chamber_parameter_name, local.chamber_service, "${element(local.token_names, count.index)}")}" - value = "${element(random_string.tokens.*.result, count.index)}" + count = length(local.token_names) + name = format(var.chamber_parameter_name, local.chamber_service, "${element(local.token_names, count.index)}") + value = element(random_string.tokens.*.result, count.index) description = "Teleport join token: ${element(local.token_names, count.index)}" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "teleport_proxy_domain_name" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "teleport_proxy_domain_name")}" - value = "${var.teleport_proxy_domain_name}" + name = format(var.chamber_parameter_name, local.chamber_service, "teleport_proxy_domain_name") + value = var.teleport_proxy_domain_name description = "Teleport Proxy domain name" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "teleport_version" { - name = "${format(var.chamber_parameter_name, local.chamber_service, "teleport_version")}" - value = "${var.teleport_version}" + name = format(var.chamber_parameter_name, local.chamber_service, "teleport_version") + value = var.teleport_version description = "Teleport version to install" type = "String" overwrite = "true" @@ -230,6 +230,6 @@ resource "kubernetes_namespace" "default" { name = "${var.kubernetes_namespace}" } - name = "${var.kubernetes_namespace}" + name = var.kubernetes_namespace } } diff --git a/deprecated/aws/teleport/outputs.tf b/deprecated/aws/teleport/outputs.tf index 2ed293f5f..ad27c55b4 100644 --- a/deprecated/aws/teleport/outputs.tf +++ b/deprecated/aws/teleport/outputs.tf @@ -1,25 +1,25 @@ output "teleport_version" { - value = "${var.teleport_version}" + value = var.teleport_version } output "teleport_proxy_domain_name" { - value = "${var.teleport_proxy_domain_name}" + value = var.teleport_proxy_domain_name } output "parameter_store_prefix" { - value = "${format(var.chamber_parameter_name, local.chamber_service, "")}" + value = format(var.chamber_parameter_name, local.chamber_service, "") } output "teleport_kubernetes_namespace" { - value = "${var.kubernetes_namespace}" + value = var.kubernetes_namespace } output "teleport_auth_iam_role" { - value = "${aws_iam_role.teleport.name}" + value = aws_iam_role.teleport.name } output "teleport_cluster_state_dynamodb_table" { - value = "${module.teleport_backend.dynamodb_state_table_id}" + value = module.teleport_backend.dynamodb_state_table_id } output "teleport_audit_sessions_uri" { diff --git a/deprecated/aws/teleport/variables.tf b/deprecated/aws/teleport/variables.tf index 26a38e51f..e52ba7b5a 100644 --- a/deprecated/aws/teleport/variables.tf +++ b/deprecated/aws/teleport/variables.tf @@ -1,5 +1,5 @@ variable "permitted_nodes" { - type = "string" + type = string # Set to 'masters' if using kiam to control roles default = "both" @@ -7,67 +7,67 @@ variable "permitted_nodes" { } variable "cluster_name" { - type = "string" + type = string description = "Kops cluster name (e.g. `us-east-1.prod.cloudposse.co` or `cluster-1.cloudposse.co`)" } variable "aws_assume_role_arn" { - type = "string" + type = string description = "AWS IAM Role for Terraform to assume during operation" } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "kubernetes_namespace" { - type = "string" + type = string description = "Kubernetes namespace in which to place Teleport resources" default = "teleport" } variable "stage" { - type = "string" + type = string description = "Stage, e.g. 'prod', 'staging', 'dev', or 'test'" } variable "name" { - type = "string" + type = string description = "The name of the app" default = "teleport" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter to be used between `namespace`, `stage`, `name` and `attributes`" } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags (e.g. map('BusinessUnit`,`XYZ`)" } variable "teleport_version" { - type = "string" + type = string description = "Version number of Teleport to install (e.g. \"4.0.9\")" } variable "teleport_proxy_domain_name" { - type = "string" + type = string description = "Domain name to use for Teleport Proxy" } variable "masters_name" { - type = "string" + type = string default = "masters" description = "Kops masters subdomain name in the cluster DNS zone" } variable "nodes_name" { - type = "string" + type = string default = "nodes" description = "Kops nodes subdomain name in the cluster DNS zone" } @@ -82,25 +82,25 @@ variable "chamber_parameter_name" { } variable "s3_prefix" { - type = "string" + type = string description = "S3 bucket prefix" default = "" } variable "s3_standard_transition_days" { - type = "string" + type = string description = "Number of days to persist in the standard storage tier before moving to the glacier tier" default = "30" } variable "s3_glacier_transition_days" { - type = "string" + type = string description = "Number of days after which to move the data to the glacier storage tier" default = "60" } variable "s3_expiration_days" { - type = "string" + type = string description = "Number of days after which to expunge the objects" default = "90" } diff --git a/deprecated/aws/tfstate-backend/README.md b/deprecated/aws/tfstate-backend/README.md index 5a7634dbf..196869467 100644 --- a/deprecated/aws/tfstate-backend/README.md +++ b/deprecated/aws/tfstate-backend/README.md @@ -1,6 +1,6 @@ # Bootstrap Process -Perform these steps in each account, the very first time, in order to setup the tfstate bucket. +Perform these steps in each account, the very first time, in order to setup the tfstate bucket. ## Create @@ -19,7 +19,7 @@ ENV TF_DYNAMODB_TABLE="cpco-staging-terraform-state-lock" ## Destroy -To destroy the state bucket, first make sure all services in the account have already been destroyed. +To destroy the state bucket, first make sure all services in the account have already been destroyed. Then run: ``` diff --git a/deprecated/aws/tfstate-backend/main.tf b/deprecated/aws/tfstate-backend/main.tf index cbbeb258e..96822414f 100644 --- a/deprecated/aws/tfstate-backend/main.tf +++ b/deprecated/aws/tfstate-backend/main.tf @@ -8,63 +8,63 @@ variable "aws_assume_role_arn" {} provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string description = "Application or solution name (e.g. `app`)" default = "terraform" } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter to be used between `namespace`, `stage`, `name` and `attributes`" } variable "attributes" { - type = "list" + type = list(string) default = ["state"] description = "Additional attributes (e.g. `1`)" } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)" } variable "region" { - type = "string" + type = string description = "AWS Region the S3 bucket should reside in" default = "us-west-2" } variable "force_destroy" { - type = "string" + type = string description = "A boolean that indicates the S3 bucket can be destroyed even if it contains objects. These objects are not recoverable." default = "false" } module "tfstate_backend" { source = "git::https://github.com/cloudposse/terraform-aws-tfstate-backend.git?ref=tags/0.7.0" - namespace = "${var.namespace}" - name = "${var.name}" - stage = "${var.stage}" - attributes = "${var.attributes}" - tags = "${var.tags}" - region = "${var.region}" - force_destroy = "${var.force_destroy}" + namespace = var.namespace + name = var.name + stage = var.stage + attributes = var.attributes + tags = var.tags + region = var.region + force_destroy = var.force_destroy } diff --git a/deprecated/aws/tfstate-backend/outputs.tf b/deprecated/aws/tfstate-backend/outputs.tf index 3ae318d44..2f67095d8 100644 --- a/deprecated/aws/tfstate-backend/outputs.tf +++ b/deprecated/aws/tfstate-backend/outputs.tf @@ -1,23 +1,23 @@ output "tfstate_backend_s3_bucket_domain_name" { - value = "${module.tfstate_backend.s3_bucket_domain_name}" + value = module.tfstate_backend.s3_bucket_domain_name } output "tfstate_backend_s3_bucket_id" { - value = "${module.tfstate_backend.s3_bucket_id}" + value = module.tfstate_backend.s3_bucket_id } output "tfstate_backend_s3_bucket_arn" { - value = "${module.tfstate_backend.s3_bucket_arn}" + value = module.tfstate_backend.s3_bucket_arn } output "tfstate_backend_dynamodb_table_name" { - value = "${module.tfstate_backend.dynamodb_table_name}" + value = module.tfstate_backend.dynamodb_table_name } output "tfstate_backend_dynamodb_table_id" { - value = "${module.tfstate_backend.dynamodb_table_id}" + value = module.tfstate_backend.dynamodb_table_id } output "tfstate_backend_dynamodb_table_arn" { - value = "${module.tfstate_backend.dynamodb_table_arn}" + value = module.tfstate_backend.dynamodb_table_arn } diff --git a/deprecated/aws/tfstate-backend/scripts/force-destroy.sh b/deprecated/aws/tfstate-backend/scripts/force-destroy.sh index ac172be9a..af4c5b062 100755 --- a/deprecated/aws/tfstate-backend/scripts/force-destroy.sh +++ b/deprecated/aws/tfstate-backend/scripts/force-destroy.sh @@ -1,5 +1,5 @@ #!/bin/bash - + # Remove all versions and delete markers for each object OBJECT_VERSIONS=$(aws --output text s3api list-object-versions --bucket "$1" | grep -E '^VERSIONS|^DELETEMARKERS') @@ -20,6 +20,6 @@ while read -r OBJECT_VERSION; do aws s3api delete-object --bucket $1 --key $KEY --version-id $VERSION_ID >/dev/null fi done <<< "$OBJECT_VERSIONS" - + # Remove the bucket with --force option to remove any remaining files without versions. aws s3 rb --force s3://$1 diff --git a/deprecated/aws/users/main.tf b/deprecated/aws/users/main.tf index 79dd367c8..b68506b65 100644 --- a/deprecated/aws/users/main.tf +++ b/deprecated/aws/users/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -29,29 +29,29 @@ data "terraform_remote_state" "root_iam" { } locals { - accounts_enabled = "${concat(list("root"), var.accounts_enabled)}" + accounts_enabled = concat(list("root"), var.accounts_enabled) } # Fetch the OrganizationAccountAccessRole ARNs from SSM module "admin_groups" { source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" - parameter_read = "${formatlist("/${var.namespace}/%s/admin_group", local.accounts_enabled)}" + parameter_read = formatlist("/${var.namespace}/%s/admin_group", local.accounts_enabled) } locals { - account_alias = "${data.terraform_remote_state.account_settings.account_alias}" - signin_url = "${data.terraform_remote_state.account_settings.signin_url}" + account_alias = data.terraform_remote_state.account_settings.account_alias + signin_url = data.terraform_remote_state.account_settings.signin_url admin_groups = ["${module.admin_groups.values}"] readonly_groups = ["${data.terraform_remote_state.root_iam.readonly_group}"] - minimum_password_length = "${data.terraform_remote_state.account_settings.minimum_password_length}" + minimum_password_length = data.terraform_remote_state.account_settings.minimum_password_length } output "account_alias" { description = "AWS IAM Account Alias" - value = "${local.account_alias}" + value = local.account_alias } output "signin_url" { description = "AWS Signin URL" - value = "${local.signin_url}" + value = local.signin_url } diff --git a/deprecated/aws/users/variables.tf b/deprecated/aws/users/variables.tf index d67b5a583..b50636363 100644 --- a/deprecated/aws/users/variables.tf +++ b/deprecated/aws/users/variables.tf @@ -1,30 +1,30 @@ variable "aws_assume_role_arn" {} variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string description = "Application or solution name (e.g. `app`)" default = "terraform" } variable "smtp_username" { description = "Username to authenticate with the SMTP server" - type = "string" + type = string default = "" } variable "smtp_password" { description = "Password to authenticate with the SMTP server" - type = "string" + type = string default = "" } @@ -39,7 +39,7 @@ variable "smtp_port" { } variable "accounts_enabled" { - type = "list" + type = list(string) description = "Accounts to enable" default = ["dev", "staging", "prod", "testing", "audit"] } diff --git a/deprecated/aws/vpc-peering-intra-account/main.tf b/deprecated/aws/vpc-peering-intra-account/main.tf index 8fa6b25ef..858d4e995 100644 --- a/deprecated/aws/vpc-peering-intra-account/main.tf +++ b/deprecated/aws/vpc-peering-intra-account/main.tf @@ -6,28 +6,28 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } module "vpc_peering" { source = "git::https://github.com/cloudposse/terraform-aws-vpc-peering.git?ref=tags/0.2.0" - enabled = "${var.enabled}" + enabled = var.enabled - stage = "${var.stage}" - namespace = "${var.namespace}" - name = "${var.name}" - delimiter = "${var.delimiter}" - attributes = "${var.attributes}" - tags = "${var.tags}" + stage = var.stage + namespace = var.namespace + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags - requestor_vpc_id = "${var.requestor_vpc_id}" - requestor_vpc_tags = "${var.requestor_vpc_tags}" - acceptor_vpc_id = "${var.acceptor_vpc_id}" - acceptor_vpc_tags = "${var.acceptor_vpc_tags}" - auto_accept = "${var.auto_accept}" + requestor_vpc_id = var.requestor_vpc_id + requestor_vpc_tags = var.requestor_vpc_tags + acceptor_vpc_id = var.acceptor_vpc_id + acceptor_vpc_tags = var.acceptor_vpc_tags + auto_accept = var.auto_accept - acceptor_allow_remote_vpc_dns_resolution = "${var.acceptor_allow_remote_vpc_dns_resolution}" - requestor_allow_remote_vpc_dns_resolution = "${var.requestor_allow_remote_vpc_dns_resolution}" + acceptor_allow_remote_vpc_dns_resolution = var.acceptor_allow_remote_vpc_dns_resolution + requestor_allow_remote_vpc_dns_resolution = var.requestor_allow_remote_vpc_dns_resolution } diff --git a/deprecated/aws/vpc-peering-intra-account/outputs.tf b/deprecated/aws/vpc-peering-intra-account/outputs.tf index 287462bd2..f7993616e 100644 --- a/deprecated/aws/vpc-peering-intra-account/outputs.tf +++ b/deprecated/aws/vpc-peering-intra-account/outputs.tf @@ -1,9 +1,9 @@ output "connection_id" { - value = "${module.vpc_peering.connection_id}" + value = module.vpc_peering.connection_id description = "VPC peering connection ID" } output "accept_status" { - value = "${module.vpc_peering.accept_status}" + value = module.vpc_peering.accept_status description = "The status of the VPC peering connection request" } diff --git a/deprecated/aws/vpc-peering-intra-account/variables.tf b/deprecated/aws/vpc-peering-intra-account/variables.tf index 2cc379d04..c80e31fe4 100644 --- a/deprecated/aws/vpc-peering-intra-account/variables.tf +++ b/deprecated/aws/vpc-peering-intra-account/variables.tf @@ -4,29 +4,29 @@ variable "enabled" { } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "requestor_vpc_id" { - type = "string" - description = "Requestor VPC ID" + type = string + description = "Requester VPC ID" default = "" } variable "requestor_vpc_tags" { - type = "map" - description = "Requestor VPC tags" + type = map(string) + description = "Requester VPC tags" default = {} } variable "acceptor_vpc_id" { - type = "string" + type = string description = "Acceptor VPC ID" default = "" } variable "acceptor_vpc_tags" { - type = "map" + type = map(string) description = "Acceptor VPC tags" default = {} } @@ -38,43 +38,43 @@ variable "auto_accept" { variable "acceptor_allow_remote_vpc_dns_resolution" { default = "true" - description = "Allow acceptor VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the requestor VPC" + description = "Allow acceptor VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the requester VPC" } variable "requestor_allow_remote_vpc_dns_resolution" { default = "true" - description = "Allow requestor VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the acceptor VPC" + description = "Allow requester VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the acceptor VPC" } variable "namespace" { description = "Namespace (e.g. `cp` or `cloudposse`)" - type = "string" + type = string } variable "stage" { description = "Stage (e.g. `prod`, `dev`, `staging`)" - type = "string" + type = string } variable "name" { description = "Name (e.g. `app` or `cluster`)" - type = "string" + type = string } variable "delimiter" { - type = "string" + type = string default = "-" description = "Delimiter to be used between `namespace`, `stage`, `name`, and `attributes`" } variable "attributes" { - type = "list" + type = list(string) default = [] description = "Additional attributes (e.g. `policy` or `role`)" } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags (e.g. map('BusinessUnit`,`XYZ`)" } diff --git a/deprecated/aws/vpc-peering/main.tf b/deprecated/aws/vpc-peering/main.tf index 3a961f1ac..dc28aedd8 100644 --- a/deprecated/aws/vpc-peering/main.tf +++ b/deprecated/aws/vpc-peering/main.tf @@ -6,75 +6,75 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } # Fetch the OrganizationAccountAccessRole ARNs from SSM module "requester_role_arns" { - enabled = "${var.enabled}" + enabled = var.enabled source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" parameter_read = ["/${var.namespace}/${var.requester_account}/organization_account_access_role"] } locals { - requester_vpc_tags = "${var.requester_vpc_tags}" - requester_region = "${var.requester_region}" - requester_role_arn = "${join("", module.requester_role_arns.values)}" + requester_vpc_tags = var.requester_vpc_tags + requester_region = var.requester_region + requester_role_arn = join("", module.requester_role_arns.values) } # Fetch the OrganizationAccountAccessRole ARNs from SSM module "accepter_role_arns" { - enabled = "${var.enabled}" + enabled = var.enabled source = "git::https://github.com/cloudposse/terraform-aws-ssm-parameter-store?ref=tags/0.1.5" parameter_read = ["/${var.namespace}/${var.accepter_account}/organization_account_access_role"] } locals { - accepter_vpc_tags = "${var.accepter_vpc_tags}" - accepter_region = "${var.accepter_region}" - accepter_role_arn = "${join("", module.accepter_role_arns.values)}" + accepter_vpc_tags = var.accepter_vpc_tags + accepter_region = var.accepter_region + accepter_role_arn = join("", module.accepter_role_arns.values) } module "vpc_peering" { source = "git::https://github.com/cloudposse/terraform-aws-vpc-peering-multi-account.git?ref=tags/0.1.0" - enabled = "${var.enabled}" + enabled = var.enabled - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" + namespace = var.namespace + stage = var.stage + name = var.name attributes = ["${var.requester_account}", "${var.accepter_account}"] auto_accept = true # Requester - requester_vpc_tags = "${local.requester_vpc_tags}" - requester_region = "${local.requester_region}" - requester_aws_assume_role_arn = "${local.requester_role_arn}" + requester_vpc_tags = local.requester_vpc_tags + requester_region = local.requester_region + requester_aws_assume_role_arn = local.requester_role_arn # Accepter - accepter_vpc_tags = "${local.accepter_vpc_tags}" - accepter_region = "${local.accepter_region}" - accepter_aws_assume_role_arn = "${local.accepter_role_arn}" + accepter_vpc_tags = local.accepter_vpc_tags + accepter_region = local.accepter_region + accepter_aws_assume_role_arn = local.accepter_role_arn } output "accepter_accept_status" { description = "Accepter VPC peering connection request status" - value = "${module.vpc_peering.accepter_accept_status}" + value = module.vpc_peering.accepter_accept_status } output "accepter_connection_id" { description = "Accepter VPC peering connection ID" - value = "${module.vpc_peering.accepter_connection_id}" + value = module.vpc_peering.accepter_connection_id } output "requester_accept_status" { description = "Requester VPC peering connection request status" - value = "${module.vpc_peering.requester_accept_status}" + value = module.vpc_peering.requester_accept_status } output "requester_connection_id" { description = "Requester VPC peering connection ID" - value = "${module.vpc_peering.requester_connection_id}" + value = module.vpc_peering.requester_connection_id } diff --git a/deprecated/aws/vpc-peering/variables.tf b/deprecated/aws/vpc-peering/variables.tf index 85e4cce08..cdb936c32 100644 --- a/deprecated/aws/vpc-peering/variables.tf +++ b/deprecated/aws/vpc-peering/variables.tf @@ -1,23 +1,23 @@ variable "aws_assume_role_arn" {} variable "enabled" { - type = "string" + type = string description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources" default = "true" } variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `eg` or `cp`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string description = "Application or solution name (e.g. `app`)" default = "vpc-peering" } @@ -31,7 +31,7 @@ variable "requester_region" { } variable "requester_vpc_tags" { - type = "map" + type = map(string) description = "Tags to filter for the requester's VPC" default = {} } @@ -45,7 +45,7 @@ variable "accepter_account" { } variable "accepter_vpc_tags" { - type = "map" + type = map(string) description = "Tags to filter for the accepter's VPC" default = {} } diff --git a/deprecated/aws/vpc/main.tf b/deprecated/aws/vpc/main.tf index 3cc87318c..0dd89393b 100644 --- a/deprecated/aws/vpc/main.tf +++ b/deprecated/aws/vpc/main.tf @@ -6,7 +6,7 @@ terraform { provider "aws" { assume_role { - role_arn = "${var.aws_assume_role_arn}" + role_arn = var.aws_assume_role_arn } } @@ -15,18 +15,18 @@ provider "null" { } locals { - chamber_service = "${var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service}" + chamber_service = var.chamber_service == "" ? basename(pathexpand(path.module)) : var.chamber_service # Work around limitation that conditional operator cannot be used with lists. https://github.com/hashicorp/terraform/issues/18259 - availability_zones = "${split("|", length(var.availability_zones) == 0 ? join("|", data.aws_availability_zones.available.names) : join("|", var.availability_zones))}" + availability_zones = split("|", length(var.availability_zones) == 0 ? join("|", data.aws_availability_zones.available.names) : join("|", var.availability_zones)) } module "parameter_prefix" { source = "git::https://github.com/cloudposse/terraform-terraform-label.git?ref=tags/0.2.1" namespace = "" stage = "" - name = "${var.name}" - attributes = "${var.attributes}" + name = var.name + attributes = var.attributes delimiter = "_" } @@ -34,108 +34,108 @@ data "aws_availability_zones" "available" {} module "vpc" { source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=tags/0.3.3" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - cidr_block = "${var.vpc_cidr_block}" - attributes = "${var.attributes}" - tags = "${var.tags}" + namespace = var.namespace + stage = var.stage + name = var.name + cidr_block = var.vpc_cidr_block + attributes = var.attributes + tags = var.tags } module "subnets" { source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=tags/0.12.3" - availability_zones = "${local.availability_zones}" - max_subnet_count = "${var.max_subnet_count}" - namespace = "${var.namespace}" - stage = "${var.stage}" - name = "${var.name}" - vpc_id = "${module.vpc.vpc_id}" - igw_id = "${module.vpc.igw_id}" - cidr_block = "${module.vpc.vpc_cidr_block}" - nat_gateway_enabled = "${var.vpc_nat_gateway_enabled}" - nat_instance_enabled = "${var.vpc_nat_instance_enabled}" - nat_instance_type = "${var.vpc_nat_instance_type}" - attributes = "${var.attributes}" - tags = "${var.tags}" + availability_zones = local.availability_zones + max_subnet_count = var.max_subnet_count + namespace = var.namespace + stage = var.stage + name = var.name + vpc_id = module.vpc.vpc_id + igw_id = module.vpc.igw_id + cidr_block = module.vpc.vpc_cidr_block + nat_gateway_enabled = var.vpc_nat_gateway_enabled + nat_instance_enabled = var.vpc_nat_instance_enabled + nat_instance_type = var.vpc_nat_instance_type + attributes = var.attributes + tags = var.tags } resource "aws_ssm_parameter" "vpc_id" { description = "VPC ID of backing services" - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "vpc_id")}" - value = "${module.vpc.vpc_id}" + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "vpc_id") + value = module.vpc.vpc_id type = "String" overwrite = "true" } resource "aws_ssm_parameter" "igw_id" { description = "VPC ID of backing services" - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "igw_id")}" - value = "${module.vpc.igw_id}" + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "igw_id") + value = module.vpc.igw_id type = "String" overwrite = "true" } resource "aws_ssm_parameter" "cidr_block" { description = "VPC ID of backing services" - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "cidr_block")}" - value = "${module.vpc.vpc_cidr_block}" + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "cidr_block") + value = module.vpc.vpc_cidr_block type = "String" overwrite = "true" } resource "aws_ssm_parameter" "availability_zones" { - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "availability_zones")}" - value = "${join(",", local.availability_zones)}" + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "availability_zones") + value = join(",", local.availability_zones) description = "VPC subnet availability zones" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "nat_gateways" { - count = "${var.vpc_nat_gateway_enabled == "true" ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "nat_gateways")}" - value = "${join(",", module.subnets.nat_gateway_ids)}" + count = var.vpc_nat_gateway_enabled == "true" ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "nat_gateways") + value = join(",", module.subnets.nat_gateway_ids) description = "VPC private NAT gateways" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "nat_instances" { - count = "${var.vpc_nat_instance_enabled == "true" ? 1 : 0}" - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "nat_instances")}" - value = "${join(",", module.subnets.nat_instance_ids)}" + count = var.vpc_nat_instance_enabled == "true" ? 1 : 0 + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "nat_instances") + value = join(",", module.subnets.nat_instance_ids) description = "VPC private NAT instances" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "private_subnet_cidrs" { - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "private_subnet_cidrs")}" - value = "${join(",", module.subnets.private_subnet_cidrs)}" + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "private_subnet_cidrs") + value = join(",", module.subnets.private_subnet_cidrs) description = "VPC private subnet CIDRs" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "private_subnet_ids" { - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "private_subnet_ids")}" - value = "${join(",", module.subnets.private_subnet_ids)}" + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "private_subnet_ids") + value = join(",", module.subnets.private_subnet_ids) description = "VPC private subnet AWS IDs" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "public_subnet_cidrs" { - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "public_subnet_cidrs")}" - value = "${join(",", module.subnets.public_subnet_cidrs)}" + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "public_subnet_cidrs") + value = join(",", module.subnets.public_subnet_cidrs) description = "VPC public subnet CIDRs" type = "String" overwrite = "true" } resource "aws_ssm_parameter" "public_subnet_ids" { - name = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "public_subnet_ids")}" - value = "${join(",", module.subnets.public_subnet_ids)}" + name = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "public_subnet_ids") + value = join(",", module.subnets.public_subnet_ids) description = "VPC public subnet AWS IDs" type = "String" overwrite = "true" diff --git a/deprecated/aws/vpc/outputs.tf b/deprecated/aws/vpc/outputs.tf index 7c66d939a..23fb4b993 100644 --- a/deprecated/aws/vpc/outputs.tf +++ b/deprecated/aws/vpc/outputs.tf @@ -1,48 +1,48 @@ output "parameter_store_prefix" { - value = "${format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "")}" + value = format(var.chamber_parameter_name, local.chamber_service, module.parameter_prefix.id, "") } output "vpc_id" { description = "AWS ID of the VPC created" - value = "${aws_ssm_parameter.vpc_id.value}" + value = aws_ssm_parameter.vpc_id.value } output "igw_id" { description = "AWS ID of Internet Gateway for the VPC" - value = "${aws_ssm_parameter.igw_id.value}" + value = aws_ssm_parameter.igw_id.value } output "nat_gateways" { description = "Comma-separated string list of AWS IDs of NAT Gateways for the VPC" - value = "${join("", aws_ssm_parameter.nat_gateways.*.value)}" + value = join("", aws_ssm_parameter.nat_gateways.*.value) } output "cidr_block" { description = "CIDR block of the VPC" - value = "${aws_ssm_parameter.cidr_block.value}" + value = aws_ssm_parameter.cidr_block.value } output "availability_zones" { - description = "Comma-separated string list of avaialbility zones where subnets have been created" - value = "${aws_ssm_parameter.availability_zones.value}" + description = "Comma-separated string list of availability zones where subnets have been created" + value = aws_ssm_parameter.availability_zones.value } output "public_subnet_cidrs" { description = "Comma-separated string list of CIDR blocks of public VPC subnets" - value = "${aws_ssm_parameter.public_subnet_cidrs.value}" + value = aws_ssm_parameter.public_subnet_cidrs.value } output "public_subnet_ids" { description = "Comma-separated string list of AWS IDs of public VPC subnets" - value = "${aws_ssm_parameter.public_subnet_ids.value}" + value = aws_ssm_parameter.public_subnet_ids.value } output "private_subnet_cidrs" { description = "Comma-separated string list of CIDR blocks of private VPC subnets" - value = "${aws_ssm_parameter.private_subnet_cidrs.value}" + value = aws_ssm_parameter.private_subnet_cidrs.value } output "private_subnet_ids" { description = "Comma-separated string list of AWS IDs of private VPC subnets" - value = "${aws_ssm_parameter.private_subnet_ids.value}" + value = aws_ssm_parameter.private_subnet_ids.value } diff --git a/deprecated/aws/vpc/variables.tf b/deprecated/aws/vpc/variables.tf index 76e5b19c1..7d3d7b213 100644 --- a/deprecated/aws/vpc/variables.tf +++ b/deprecated/aws/vpc/variables.tf @@ -1,40 +1,40 @@ variable "namespace" { - type = "string" + type = string description = "Namespace (e.g. `cp` or `cloudposse`)" } variable "stage" { - type = "string" + type = string description = "Stage (e.g. `prod`, `dev`, `staging`)" } variable "name" { - type = "string" + type = string description = "Name to distinguish this VPC from others in this account" default = "vpc" } variable "attributes" { - type = "list" + type = list(string) description = "Additional attributes to distinguish this VPC from others in this account" default = ["common"] } variable "aws_assume_role_arn" { - type = "string" + type = string } variable "region" { - type = "string" + type = string } variable "max_subnet_count" { default = 0 - description = "Sets the maximum amount of subnets to deploy. 0 will deploy a subnet for every provided availablility zone (in `availability_zones` variable) within the region" + description = "Sets the maximum amount of subnets to deploy. 0 will deploy a subnet for every provided availability zone (in `availability_zones` variable) within the region" } variable "availability_zones" { - type = "list" + type = list(string) default = [] description = "List of Availability Zones where subnets will be created. If empty, all zones will be used" } @@ -55,7 +55,7 @@ variable "vpc_nat_instance_type" { } variable "tags" { - type = "map" + type = map(string) default = {} description = "Additional tags, for example map(`KubernetesCluster`,`us-west-2.prod.example.com`)" } diff --git a/deprecated/eks-iam/.gitignore b/deprecated/eks-iam/.gitignore new file mode 100644 index 000000000..a9097e173 --- /dev/null +++ b/deprecated/eks-iam/.gitignore @@ -0,0 +1 @@ +!default.auto.tfvars diff --git a/modules/eks-iam/README.md b/deprecated/eks-iam/README.md similarity index 98% rename from modules/eks-iam/README.md rename to deprecated/eks-iam/README.md index 41e912331..bd080c708 100644 --- a/modules/eks-iam/README.md +++ b/deprecated/eks-iam/README.md @@ -32,7 +32,7 @@ components: | [terraform](#requirement\_terraform) | >= 0.13.0 | | [aws](#requirement\_aws) | >= 3.0 | | [local](#requirement\_local) | >= 1.3 | -| [template](#requirement\_template) | >= 2.0 | +| [template](#requirement\_template) | >= 2.2 | ## Providers @@ -79,7 +79,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters.
Set to `0` for unlimited length.
Set to `null` for default, which is `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kms\_alias\_name](#input\_kms\_alias\_name) | AWS KMS alias used for encryption/decryption of SSM parameters default is alias used in SSM | `string` | `"alias/aws/ssm"` | no | | [label\_order](#input\_label\_order) | The naming order of the id output and Name tag.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 5 elements, but at least one must be present. | `list(string)` | `null` | no | | [name](#input\_name) | Solution name, e.g. 'app' or 'jenkins' | `string` | `null` | no | diff --git a/modules/eks-iam/alb-controller-iam-policy.json b/deprecated/eks-iam/alb-controller-iam-policy.json similarity index 99% rename from modules/eks-iam/alb-controller-iam-policy.json rename to deprecated/eks-iam/alb-controller-iam-policy.json index d981ab244..ad188df25 100644 --- a/modules/eks-iam/alb-controller-iam-policy.json +++ b/deprecated/eks-iam/alb-controller-iam-policy.json @@ -188,4 +188,4 @@ "Resource": "*" } ] -} \ No newline at end of file +} diff --git a/modules/eks-iam/alb-controller.tf b/deprecated/eks-iam/alb-controller.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/alb-controller.tf rename to deprecated/eks-iam/alb-controller.tf diff --git a/modules/eks-iam/autoscaler.tf b/deprecated/eks-iam/autoscaler.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/autoscaler.tf rename to deprecated/eks-iam/autoscaler.tf diff --git a/modules/eks-iam/cert-manager.tf b/deprecated/eks-iam/cert-manager.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/cert-manager.tf rename to deprecated/eks-iam/cert-manager.tf diff --git a/modules/eks-iam/context.tf b/deprecated/eks-iam/context.tf similarity index 100% rename from modules/eks-iam/context.tf rename to deprecated/eks-iam/context.tf diff --git a/modules/eks-iam/default.auto.tfvars b/deprecated/eks-iam/default.auto.tfvars old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/default.auto.tfvars rename to deprecated/eks-iam/default.auto.tfvars diff --git a/modules/eks-iam/external-dns.tf b/deprecated/eks-iam/external-dns.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/external-dns.tf rename to deprecated/eks-iam/external-dns.tf diff --git a/modules/eks-iam/main.tf b/deprecated/eks-iam/main.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/main.tf rename to deprecated/eks-iam/main.tf diff --git a/modules/eks-iam/modules/service-account/context.tf b/deprecated/eks-iam/modules/service-account/context.tf similarity index 100% rename from modules/eks-iam/modules/service-account/context.tf rename to deprecated/eks-iam/modules/service-account/context.tf diff --git a/modules/eks-iam/modules/service-account/default.auto.tfvars b/deprecated/eks-iam/modules/service-account/default.auto.tfvars old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/modules/service-account/default.auto.tfvars rename to deprecated/eks-iam/modules/service-account/default.auto.tfvars diff --git a/modules/eks-iam/modules/service-account/main.tf b/deprecated/eks-iam/modules/service-account/main.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/modules/service-account/main.tf rename to deprecated/eks-iam/modules/service-account/main.tf diff --git a/modules/eks-iam/modules/service-account/outputs.tf b/deprecated/eks-iam/modules/service-account/outputs.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/modules/service-account/outputs.tf rename to deprecated/eks-iam/modules/service-account/outputs.tf diff --git a/modules/eks-iam/modules/service-account/variables.tf b/deprecated/eks-iam/modules/service-account/variables.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/modules/service-account/variables.tf rename to deprecated/eks-iam/modules/service-account/variables.tf diff --git a/modules/eks-iam/outputs.tf b/deprecated/eks-iam/outputs.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/outputs.tf rename to deprecated/eks-iam/outputs.tf diff --git a/deprecated/eks-iam/providers.tf b/deprecated/eks-iam/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/deprecated/eks-iam/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks-iam/tfstate-context.tf b/deprecated/eks-iam/tfstate-context.tf similarity index 100% rename from modules/eks-iam/tfstate-context.tf rename to deprecated/eks-iam/tfstate-context.tf diff --git a/modules/eks-iam/tfstate.tf b/deprecated/eks-iam/tfstate.tf similarity index 100% rename from modules/eks-iam/tfstate.tf rename to deprecated/eks-iam/tfstate.tf diff --git a/modules/eks-iam/variables.tf b/deprecated/eks-iam/variables.tf old mode 100755 new mode 100644 similarity index 100% rename from modules/eks-iam/variables.tf rename to deprecated/eks-iam/variables.tf diff --git a/modules/eks-iam/versions.tf b/deprecated/eks-iam/versions.tf old mode 100755 new mode 100644 similarity index 80% rename from modules/eks-iam/versions.tf rename to deprecated/eks-iam/versions.tf index 4076d48ca..720538dd8 --- a/modules/eks-iam/versions.tf +++ b/deprecated/eks-iam/versions.tf @@ -7,8 +7,8 @@ terraform { version = ">= 3.0" } template = { - source = "hashicorp/template" - version = ">= 2.0" + source = "cloudposse/template" + version = ">= 2.2" } local = { source = "hashicorp/local" diff --git a/deprecated/eks/ebs-controller/README.md b/deprecated/eks/ebs-controller/README.md new file mode 100644 index 000000000..178de2cbf --- /dev/null +++ b/deprecated/eks/ebs-controller/README.md @@ -0,0 +1,122 @@ +# Component: `ebs-controller` + +This component creates a Helm release for `ebs-controller` on a Kubernetes cluster. + +## Usage + +**Stack Level**: Regional + +Once the catalog file is created, the file can be imported as follows. + +```yaml +import: + - catalog/eks/ebs-controller + ... +``` + +The default catalog values + +```yaml +components: + terraform: + eks/ebs-controller: + vars: + enabled: true + + # You can use `chart_values` to set any other chart options. Treat `chart_values` as the root of the doc. + # + # # For example + # --- + # chart_values: + # enableShield: false + chart_values: {} +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [ebs\_csi\_driver\_controller](#module\_ebs\_csi\_driver\_controller) | DrFaust92/ebs-csi-driver/kubernetes | 3.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_annotations.default_storage_class](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/annotations) | resource | +| [kubernetes_storage_class.gp3_enc](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [ebs\_csi\_controller\_image](#input\_ebs\_csi\_controller\_image) | The image to use for the EBS CSI controller | `string` | `"k8s.gcr.io/provider-aws/aws-ebs-csi-driver"` | no | +| [ebs\_csi\_driver\_version](#input\_ebs\_csi\_driver\_version) | The version of the EBS CSI driver | `string` | `"v1.6.2"` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [ebs\_csi\_driver\_controller\_role\_arn](#output\_ebs\_csi\_driver\_controller\_role\_arn) | The Name of the EBS CSI driver controller IAM role ARN | +| [ebs\_csi\_driver\_controller\_role\_name](#output\_ebs\_csi\_driver\_controller\_role\_name) | The Name of the EBS CSI driver controller IAM role name | +| [ebs\_csi\_driver\_controller\_role\_policy\_arn](#output\_ebs\_csi\_driver\_controller\_role\_policy\_arn) | The Name of the EBS CSI driver controller IAM role policy ARN | +| [ebs\_csi\_driver\_controller\_role\_policy\_name](#output\_ebs\_csi\_driver\_controller\_role\_policy\_name) | The Name of the EBS CSI driver controller IAM role policy name | +| [ebs\_csi\_driver\_name](#output\_ebs\_csi\_driver\_name) | The Name of the EBS CSI driver | + + +## References + +- [aws-ebs-csi-driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/releases) + +[](https://cpco.io/component) diff --git a/modules/aurora-postgres/modules/postgresql-user/context.tf b/deprecated/eks/ebs-controller/context.tf similarity index 100% rename from modules/aurora-postgres/modules/postgresql-user/context.tf rename to deprecated/eks/ebs-controller/context.tf diff --git a/deprecated/eks/ebs-controller/main.tf b/deprecated/eks/ebs-controller/main.tf new file mode 100644 index 000000000..ac54cab12 --- /dev/null +++ b/deprecated/eks/ebs-controller/main.tf @@ -0,0 +1,56 @@ +locals { + enabled = module.this.enabled +} + +module "ebs_csi_driver_controller" { + count = local.enabled ? 1 : 0 + + # https://github.com/DrFaust92/terraform-kubernetes-ebs-csi-driver + source = "DrFaust92/ebs-csi-driver/kubernetes" + version = "3.5.0" + + ebs_csi_driver_version = var.ebs_csi_driver_version + ebs_csi_controller_image = var.ebs_csi_controller_image + ebs_csi_controller_role_name = "ebs-csi-${module.eks.outputs.cluster_shortname}" + ebs_csi_controller_role_policy_name_prefix = "ebs-csi-${module.eks.outputs.cluster_shortname}" + oidc_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + enable_volume_resizing = true +} + +# Remove non encrypted default storage class +resource "kubernetes_annotations" "default_storage_class" { + count = local.enabled ? 1 : 0 + depends_on = [module.ebs_csi_driver_controller] + + api_version = "storage.k8s.io/v1" + kind = "StorageClass" + force = "true" + + metadata { + name = "gp2" + } + + annotations = { + "storageclass.kubernetes.io/is-default-class" = "false" + } +} + +# Create the new StorageClass and make it default +resource "kubernetes_storage_class" "gp3_enc" { + count = local.enabled ? 1 : 0 + depends_on = [module.ebs_csi_driver_controller] + metadata { + name = "gp3-enc" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + } + } + storage_provisioner = "ebs.csi.aws.com" + volume_binding_mode = "WaitForFirstConsumer" + allow_volume_expansion = true + parameters = { + "encrypted" = "true" + "fsType" = "ext4" + "type" = "gp3" + } +} diff --git a/deprecated/eks/ebs-controller/outputs.tf b/deprecated/eks/ebs-controller/outputs.tf new file mode 100644 index 000000000..a30f9910f --- /dev/null +++ b/deprecated/eks/ebs-controller/outputs.tf @@ -0,0 +1,24 @@ +output "ebs_csi_driver_name" { + description = "The Name of the EBS CSI driver" + value = module.ebs_csi_driver_controller.ebs_csi_driver_name +} + +output "ebs_csi_driver_controller_role_arn" { + description = "The Name of the EBS CSI driver controller IAM role ARN" + value = module.ebs_csi_driver_controller.ebs_csi_driver_controller_role_arn +} + +output "ebs_csi_driver_controller_role_name" { + description = "The Name of the EBS CSI driver controller IAM role name" + value = module.ebs_csi_driver_controller.ebs_csi_driver_controller_role_name +} + +output "ebs_csi_driver_controller_role_policy_arn" { + description = "The Name of the EBS CSI driver controller IAM role policy ARN" + value = module.ebs_csi_driver_controller.ebs_csi_driver_controller_role_policy_arn +} + +output "ebs_csi_driver_controller_role_policy_name" { + description = "The Name of the EBS CSI driver controller IAM role policy name" + value = module.ebs_csi_driver_controller.ebs_csi_driver_controller_role_policy_name +} diff --git a/modules/eks/karpenter-provisioner/provider-helm.tf b/deprecated/eks/ebs-controller/provider-helm.tf similarity index 98% rename from modules/eks/karpenter-provisioner/provider-helm.tf rename to deprecated/eks/ebs-controller/provider-helm.tf index 9bb5edb6f..64459d4f4 100644 --- a/modules/eks/karpenter-provisioner/provider-helm.tf +++ b/deprecated/eks/ebs-controller/provider-helm.tf @@ -85,7 +85,7 @@ variable "kubeconfig_exec_auth_api_version" { variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" } @@ -101,7 +101,7 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] diff --git a/deprecated/eks/ebs-controller/providers.tf b/deprecated/eks/ebs-controller/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/deprecated/eks/ebs-controller/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/datadog-agent/remote-state.tf b/deprecated/eks/ebs-controller/remote-state.tf similarity index 87% rename from modules/datadog-agent/remote-state.tf rename to deprecated/eks/ebs-controller/remote-state.tf index 6ef90fd26..ac55ba94c 100644 --- a/modules/datadog-agent/remote-state.tf +++ b/deprecated/eks/ebs-controller/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" component = var.eks_component_name diff --git a/deprecated/eks/ebs-controller/variables.tf b/deprecated/eks/ebs-controller/variables.tf new file mode 100644 index 000000000..e2e9313f0 --- /dev/null +++ b/deprecated/eks/ebs-controller/variables.tf @@ -0,0 +1,22 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "ebs_csi_driver_version" { + type = string + description = "The version of the EBS CSI driver" + default = "v1.6.2" +} + +variable "ebs_csi_controller_image" { + type = string + description = "The image to use for the EBS CSI controller" + default = "k8s.gcr.io/provider-aws/aws-ebs-csi-driver" +} diff --git a/deprecated/eks/ebs-controller/versions.tf b/deprecated/eks/ebs-controller/versions.tf new file mode 100644 index 000000000..fb8857fab --- /dev/null +++ b/deprecated/eks/ebs-controller/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.7.1, != 2.21.0" + } + } +} diff --git a/deprecated/eks/echo-server/README.md b/deprecated/eks/echo-server/README.md new file mode 100644 index 000000000..de7a28cec --- /dev/null +++ b/deprecated/eks/echo-server/README.md @@ -0,0 +1,161 @@ +# Component: `eks/echo-server` + +This is copied from [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/echo-server). + +This component installs the [Ealenn/Echo-Server](https://github.com/Ealenn/Echo-Server) to EKS clusters. +The echo server is a server that sends it back to the client a JSON representation of all the data +the server received, which is a combination of information sent by the client and information sent +by the web server infrastructure. For further details, please see [Echo-Server documentation](https://ealenn.github.io/Echo-Server/). + +## Prerequisites + +Echo server is intended to provide end-to-end testing of everything needed to deploy an application or service with a public HTTPS endpoint. +Therefore, it requires several other components. + +At the moment, it supports 2 configurations: + +1. ALB with ACM Certificate + - AWS Load Balancer Controller (ALB) version 2.2.0 or later, with ACM certificate auto-discovery enabled + - Pre-provisioned ACM TLS certificate covering the provisioned host name (typically a wildcard certificate covering all hosts in the domain) +2. Nginx with Cert Manager Certificate + - Nginx (via `kubernetes/ingress-nginx` controller). We recommend `ingress-nginx` v1.1.0 or later, but `echo-server` + should work with any version that supports Ingress API version `networking.k8s.io/v1`. + - `jetstack/cert-manager` configured to automatically (via Ingress Shim, installed by default) generate TLS certificates via a Cluster Issuer + (by default, named `letsEncrypt-prod`). + +In both configurations, it has these common requirements: +- Kubernetes version 1.19 or later +- Ingress API version `networking.k8s.io/v1` +- [kubernetes-sigs/external-dns](https://github.com/kubernetes-sigs/external-dns) +- A default IngressClass, either explicitly provisioned or supported without provisioning by the Ingress controller. + +## Warnings + +A Terraform plan may fail to apply, giving a Kubernetes authentication failure. This is due to a known issue with +Terraform and the Kubernetes provider. During the "plan" phase Terraform gets a short-lived Kubernetes +authentication token and caches it, and then tries to use it during "apply". If the token has expired by +the time you try to run "apply", the "apply" will fail. The workaround is to run `terraform apply -auto-approve` without +a "plan" file. + +## Usage + +**Stack Level**: Regional + +Use this in the catalog or use these variables to overwrite the catalog values. + +```yaml +components: + terraform: + eks/echo-server: + metadata: + component: eks/echo-server + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: "echo-server" + kubernetes_namespace: "echo" + description: "Echo server, for testing purposes" + create_namespace: true + timeout: 180 + wait: true + atomic: true + cleanup_on_fail: true + + ingress_type: "alb" + # %[1]v is the tenant name, %[2]v is the stage name, %[3]v is the region name + hostname_template: "echo.%[3]v.%[2]v.%[1]v.sample-domain.net" +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [alb](#module\_alb) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [echo\_server](#module\_echo\_server) | cloudposse/helm-release/aws | 0.10.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [alb\_controller\_ingress\_group\_component\_name](#input\_alb\_controller\_ingress\_group\_component\_name) | The name of the alb\_controller\_ingress\_group component | `string` | `"eks/alb-controller-ingress-group"` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart\_values](#input\_chart\_values) | Addition map values to yamlencode as `helm_release` values. | `any` | `{}` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `null` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_namespace](#input\_create\_namespace) | Create the Kubernetes namespace if it does not yet exist | `bool` | `true` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [description](#input\_description) | Set release description attribute (visible in the history). | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [hostname\_template](#input\_hostname\_template) | The `format()` string to use to generate the hostname via `format(var.hostname_template, var.tenant, var.stage, var.environment)`"
Typically something like `"echo.%[3]v.%[2]v.example.com"`. | `string` | n/a | yes | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [ingress\_type](#input\_ingress\_type) | Set to 'nginx' to create an ingress resource relying on an NGiNX backend for the echo-server service. Set to 'alb' to create an ingress resource relying on an AWS ALB backend for the echo-server service. Leave blank to not create any ingress for the echo-server service. | `string` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | n/a | yes | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [repository](#input\_repository) | Repository URL where to locate the requested chart. | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | +| [verify](#input\_verify) | Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart | `bool` | `false` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [metadata](#output\_metadata) | Block status of the deployed release | + + +## References +* https://github.com/Ealenn/Echo-Server diff --git a/deprecated/eks/echo-server/charts/echo-server/.helmignore b/deprecated/eks/echo-server/charts/echo-server/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deprecated/eks/echo-server/charts/echo-server/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deprecated/eks/echo-server/charts/echo-server/Chart.yaml b/deprecated/eks/echo-server/charts/echo-server/Chart.yaml new file mode 100644 index 000000000..24c16905b --- /dev/null +++ b/deprecated/eks/echo-server/charts/echo-server/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: "echo-server" +description: A server that replicates the request sent by the client and sends it back. + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.3.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.3.0" diff --git a/deprecated/eks/echo-server/charts/echo-server/templates/NOTES.txt b/deprecated/eks/echo-server/charts/echo-server/templates/NOTES.txt new file mode 100644 index 000000000..c2e6e75b1 --- /dev/null +++ b/deprecated/eks/echo-server/charts/echo-server/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "echo-server.name" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "echo-server.name" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "echo-server.name" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "echo-server.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/deprecated/eks/echo-server/charts/echo-server/templates/_helpers.tpl b/deprecated/eks/echo-server/charts/echo-server/templates/_helpers.tpl new file mode 100644 index 000000000..6ac8f57cb --- /dev/null +++ b/deprecated/eks/echo-server/charts/echo-server/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "echo-server.name" -}} + {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "echo-server.fullname" -}} + {{- if .Values.fullnameOverride }} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} + {{- else }} + {{- $name := default .Chart.Name .Values.nameOverride }} + {{- if contains $name .Release.Name }} + {{- .Release.Name | trunc 63 | trimSuffix "-" }} + {{- else }} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} + {{- end }} + {{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "echo-server.chart" -}} + {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels + helm.sh/chart: {{ include "echo-server.chart" . }} + {{- if .Chart.AppVersion }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + {{- end }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +*/}} +{{- define "echo-server.labels" -}} + {{ include "echo-server.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels + app.kubernetes.io/name: {{ include "echo-server.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +*/}} +{{- define "echo-server.selectorLabels" -}} + app: {{ include "echo-server.fullname" . }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "echo-server.serviceAccountName" -}} + {{- if .Values.serviceAccount.create }} + {{- default (include "echo-server.fullname" .) .Values.serviceAccount.name }} + {{- else }} + {{- default "default" .Values.serviceAccount.name }} + {{- end }} +{{- end }} diff --git a/deprecated/eks/echo-server/charts/echo-server/templates/deployment.yaml b/deprecated/eks/echo-server/charts/echo-server/templates/deployment.yaml new file mode 100644 index 000000000..1e85f1c36 --- /dev/null +++ b/deprecated/eks/echo-server/charts/echo-server/templates/deployment.yaml @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "echo-server.fullname" . }} + labels: + {{- include "echo-server.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "echo-server.selectorLabels" . | nindent 6 }} + template: + metadata: + name: {{ include "echo-server.fullname" . }} + labels: + {{- include "echo-server.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + # Disable the feature that turns the echo server into a file browser on the server (security risk) + - "--enable:file=false" + ports: + - name: http + containerPort: 80 + protocol: TCP diff --git a/deprecated/eks/echo-server/charts/echo-server/templates/ingress.yaml b/deprecated/eks/echo-server/charts/echo-server/templates/ingress.yaml new file mode 100644 index 000000000..86c8bb577 --- /dev/null +++ b/deprecated/eks/echo-server/charts/echo-server/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if or (eq (printf "%v" .Values.ingress.nginx.enabled) "true") (eq (printf "%v" .Values.ingress.alb.enabled) "true") -}} + {{- $fullName := include "echo-server.fullname" . -}} + {{- $svcName := include "echo-server.name" . -}} + {{- $svcPort := .Values.service.port -}} + {{- $nginxTlsEnabled := and (eq (printf "%v" .Values.ingress.nginx.enabled) "true") (eq (printf "%v" .Values.tlsEnabled) "true")}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName }} + annotations: + {{- if eq (printf "%v" .Values.ingress.nginx.enabled) "true" }} + kubernetes.io/ingress.class: {{ .Values.ingress.nginx.class }} + {{- if (index .Values.ingress.nginx "tls_certificate_cluster_issuer") }} + cert-manager.io/cluster-issuer: {{ .Values.ingress.nginx.tls_certificate_cluster_issuer }} + {{- end }} + {{- else if eq (printf "%v" .Values.ingress.alb.enabled) "true" }} + kubernetes.io/ingress.class: {{ .Values.ingress.alb.class }} + {{- if not .Values.ingress.alb.group_name }} + alb.ingress.kubernetes.io/load-balancer-name: {{ index .Values.ingress.alb "load_balancer_name" | default "k8s-common" }} + {{- end }} + alb.ingress.kubernetes.io/group.name: {{ index .Values.ingress.alb "group_name" | default "common" }} + alb.ingress.kubernetes.io/scheme: {{ index .Values.ingress.alb "scheme" | default "internet-facing" }} + {{- if .Values.ingress.alb.access_logs.enabled }} + alb.ingress.kubernetes.io/load-balancer-attributes: access_logs.s3.enabled=true,access_logs.s3.bucket={{.Values.ingress.alb.access_logs.s3_bucket_name}},access_logs.s3.prefix={{.Values.ingress.alb.access_logs.s3_bucket_prefix}} + {{- end }} + alb.ingress.kubernetes.io/target-type: 'ip' + {{- if eq (printf "%v" .Values.ingress.alb.ssl_redirect.enabled) "true" }} + alb.ingress.kubernetes.io/ssl-redirect: '{{ .Values.ingress.alb.ssl_redirect.port }}' + {{- end }} + {{- if eq (printf "%v" .Values.tlsEnabled) "true" }} + alb.ingress.kubernetes.io/backend-protocol: HTTP + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80},{"HTTPS":443}]' + {{- else }} + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}]' + {{- end }} + {{- end }} + labels: + {{- include "echo-server.labels" . | nindent 4 }} +spec: + {{- if $nginxTlsEnabled }} + tls: # < placing a host in the TLS config will indicate a certificate should be created + - hosts: + - {{ .Values.ingress.hostname }} + secretName: {{ $svcName }}-cert # < cert-manager will store the created certificate in this secret. + {{- end }} + rules: + - host: {{ .Values.ingress.hostname }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} +{{- end }} diff --git a/deprecated/eks/echo-server/charts/echo-server/templates/service.yaml b/deprecated/eks/echo-server/charts/echo-server/templates/service.yaml new file mode 100644 index 000000000..014977e05 --- /dev/null +++ b/deprecated/eks/echo-server/charts/echo-server/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "echo-server.name" . }} + labels: + {{- include "echo-server.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "echo-server.selectorLabels" . | nindent 4 }} diff --git a/deprecated/eks/echo-server/charts/echo-server/values.yaml b/deprecated/eks/echo-server/charts/echo-server/values.yaml new file mode 100644 index 000000000..777654c4d --- /dev/null +++ b/deprecated/eks/echo-server/charts/echo-server/values.yaml @@ -0,0 +1,96 @@ +# Default values for echo-server. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + # image.repository -- https://hub.docker.com/r/ealen/echo-server + repository: ealen/echo-server + # image.tag -- https://github.com/Ealenn/Echo-Server/releases + tag: 0.4.2 + pullPolicy: Always + +#imagePullSecrets: [] +nameOverride: "" +#fullnameOverride: "" + +#serviceAccount: +# # Specifies whether a service account should be created +# create: true +# # Annotations to add to the service account +# annotations: {} +# # The name of the service account to use. +# # If not set and create is true, a name is generated using the fullname template +# name: "" + +#podAnnotations: {} + +#podSecurityContext: {} +# # fsGroup: 2000 + +#securityContext: {} +# # capabilities: +# # drop: +# # - ALL +# # readOnlyRootFilesystem: true +# # runAsNonRoot: true +# # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +tlsEnabled: true + +ingress: + nginx: + # ingress.nginx.enabled -- Enable NGiNX ingress + enabled: false + # annotation values + ## kubernetes.io/ingress.class: + class: "nginx" + ## cert-manager.io/cluster-issuer: + tls_certificate_cluster_issuer: "letsencrypt-prod" + alb: + enabled: true + # annotation values + ## kubernetes.io/ingress.class: + class: "alb" + ## alb.ingress.kubernetes.io/load-balancer-name: + ### load_balancer_name: "k8s-common" + ## alb.ingress.kubernetes.io/group.name: + ### group_name: "common" + ssl_redirect: + enabled: true + ## alb.ingress.kubernetes.io/ssl-redirect: + port: 443 + access_logs: + enabled: false + ## s3_bucket_name: "acme-ue2-prod-eks-cluster-alb-access-logs" + s3_bucket_prefix: "echo-server" + +#resources: {} +# # We usually recommend not to specify default resources and to leave this as a conscious +# # choice for the user. This also increases chances charts run on environments with little +# # resources, such as Minikube. If you do want to specify resources, uncomment the following +# # lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# # limits: +# # cpu: 100m +# # memory: 128Mi +# # requests: +# # cpu: 100m +# # memory: 128Mi + +autoscaling: + enabled: false + #minReplicas: 1 + #maxReplicas: 100 + #targetCPUUtilizationPercentage: 80 + #targetMemoryUtilizationPercentage: 80 + +#nodeSelector: {} + +#tolerations: [] + +#affinity: {} diff --git a/modules/datadog-agent/context.tf b/deprecated/eks/echo-server/context.tf similarity index 100% rename from modules/datadog-agent/context.tf rename to deprecated/eks/echo-server/context.tf diff --git a/deprecated/eks/echo-server/helm-variables.tf b/deprecated/eks/echo-server/helm-variables.tf new file mode 100644 index 000000000..53ca0364a --- /dev/null +++ b/deprecated/eks/echo-server/helm-variables.tf @@ -0,0 +1,72 @@ +variable "kubernetes_namespace" { + type = string + description = "The namespace to install the release into." +} + +variable "description" { + type = string + description = "Set release description attribute (visible in the history)." + default = null +} + +variable "repository" { + type = string + description = "Repository URL where to locate the requested chart." + default = null +} + +variable "chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." + default = null +} + +variable "create_namespace" { + type = bool + description = "Create the Kubernetes namespace if it does not yet exist" + default = true +} + +variable "verify" { + type = bool + description = "Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart" + default = false +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`." + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used." + default = true +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails." + default = true +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = null +} + +variable "ingress_type" { + type = string + default = null + description = "Set to 'nginx' to create an ingress resource relying on an NGiNX backend for the echo-server service. Set to 'alb' to create an ingress resource relying on an AWS ALB backend for the echo-server service. Leave blank to not create any ingress for the echo-server service." +} + +variable "hostname_template" { + type = string + description = <<-EOT + The `format()` string to use to generate the hostname via `format(var.hostname_template, var.tenant, var.stage, var.environment)`" + Typically something like `"echo.%[3]v.%[2]v.example.com"`. + EOT +} diff --git a/deprecated/eks/echo-server/main.tf b/deprecated/eks/echo-server/main.tf new file mode 100644 index 000000000..b58d0dca2 --- /dev/null +++ b/deprecated/eks/echo-server/main.tf @@ -0,0 +1,63 @@ +locals { + ingress_nginx_enabled = var.ingress_type == "nginx" ? true : false + ingress_alb_enabled = var.ingress_type == "alb" ? true : false +} + +module "echo_server" { + source = "cloudposse/helm-release/aws" + version = "0.10.0" + + name = module.this.name + chart = "${path.module}/charts/echo-server" + + # Optional arguments + description = var.description + repository = var.repository + chart_version = var.chart_version + verify = var.verify + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + create_namespace_with_kubernetes = var.create_namespace + kubernetes_namespace = var.kubernetes_namespace + kubernetes_namespace_labels = merge(module.this.tags, { name = var.kubernetes_namespace }) + + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + + set = [ + { + name = "ingress.hostname" + value = format(var.hostname_template, var.tenant, var.stage, var.environment) + type = "auto" + }, + { + name = "ingress.nginx.enabled" + value = local.ingress_nginx_enabled + type = "auto" + }, + { + name = "ingress.alb.group_name" + value = module.alb.outputs.group_name + type = "auto" + }, + { + name = "ingress.alb.enabled" + value = local.ingress_alb_enabled + type = "auto" + }, + { + name = "ingress.alb.scheme" + value = module.alb.outputs.load_balancer_scheme + type = "auto" + }, + ] + + values = compact([ + # additional values + try(length(var.chart_values), 0) == 0 ? null : yamlencode(var.chart_values) + ]) + + context = module.this.context +} diff --git a/deprecated/eks/echo-server/outputs.tf b/deprecated/eks/echo-server/outputs.tf new file mode 100644 index 000000000..3199457ce --- /dev/null +++ b/deprecated/eks/echo-server/outputs.tf @@ -0,0 +1,4 @@ +output "metadata" { + value = try(one(module.echo_server.metadata), null) + description = "Block status of the deployed release" +} diff --git a/modules/eks/efs-controller/provider-helm.tf b/deprecated/eks/echo-server/provider-helm.tf similarity index 80% rename from modules/eks/efs-controller/provider-helm.tf rename to deprecated/eks/echo-server/provider-helm.tf index 20e4d3837..64459d4f4 100644 --- a/modules/eks/efs-controller/provider-helm.tf +++ b/deprecated/eks/echo-server/provider-helm.tf @@ -2,6 +2,12 @@ # # This file is a drop-in to provide a helm provider. # +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# # All the following variables are just about configuring the Kubernetes provider # to be able to modify EKS cluster. The reason there are so many options is # because at various times, each one of them has had problems, so we give you a choice. @@ -79,7 +85,7 @@ variable "kubeconfig_exec_auth_api_version" { variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" } @@ -95,14 +101,16 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] - certificate_authority_data = module.eks.outputs.eks_cluster_certificate_authority_data - eks_cluster_id = module.eks.outputs.eks_cluster_id - eks_cluster_endpoint = module.eks.outputs.eks_cluster_endpoint + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -114,14 +122,14 @@ provider "helm" { kubernetes { host = local.eks_cluster_endpoint cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" config_context = var.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -132,21 +140,21 @@ provider "helm" { } } experiments { - manifest = var.helm_manifest_experiment_enabled + manifest = var.helm_manifest_experiment_enabled && module.this.enabled } } provider "kubernetes" { host = local.eks_cluster_endpoint cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" config_context = var.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/deprecated/eks/echo-server/providers.tf b/deprecated/eks/echo-server/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/deprecated/eks/echo-server/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/eks/echo-server/remote-state.tf b/deprecated/eks/echo-server/remote-state.tf new file mode 100644 index 000000000..a31ae6bb6 --- /dev/null +++ b/deprecated/eks/echo-server/remote-state.tf @@ -0,0 +1,17 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} + +module "alb" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.alb_controller_ingress_group_component_name + + context = module.this.context +} diff --git a/deprecated/eks/echo-server/variables.tf b/deprecated/eks/echo-server/variables.tf new file mode 100644 index 000000000..48da55209 --- /dev/null +++ b/deprecated/eks/echo-server/variables.tf @@ -0,0 +1,22 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "alb_controller_ingress_group_component_name" { + type = string + description = "The name of the alb_controller_ingress_group component" + default = "eks/alb-controller-ingress-group" +} + +variable "chart_values" { + type = any + description = "Addition map values to yamlencode as `helm_release` values." + default = {} +} diff --git a/deprecated/eks/echo-server/versions.tf b/deprecated/eks/echo-server/versions.tf new file mode 100644 index 000000000..fb8857fab --- /dev/null +++ b/deprecated/eks/echo-server/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.7.1, != 2.21.0" + } + } +} diff --git a/modules/eks/efs-controller/README.md b/deprecated/eks/efs-controller/README.md similarity index 95% rename from modules/eks/efs-controller/README.md rename to deprecated/eks/efs-controller/README.md index 4c5d146a8..c6c495c7a 100644 --- a/modules/eks/efs-controller/README.md +++ b/deprecated/eks/efs-controller/README.md @@ -44,23 +44,24 @@ components: | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0, != 2.21.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.0, != 2.21.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [efs](#module\_efs) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [efs\_controller](#module\_efs\_controller) | cloudposse/helm-release/aws | 0.5.0 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [efs](#module\_efs) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [efs\_controller](#module\_efs\_controller) | cloudposse/helm-release/aws | 0.9.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -92,10 +93,8 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | diff --git a/modules/eks/efs-controller/context.tf b/deprecated/eks/efs-controller/context.tf similarity index 100% rename from modules/eks/efs-controller/context.tf rename to deprecated/eks/efs-controller/context.tf diff --git a/modules/eks/efs-controller/main.tf b/deprecated/eks/efs-controller/main.tf similarity index 95% rename from modules/eks/efs-controller/main.tf rename to deprecated/eks/efs-controller/main.tf index 9a32566c0..0867d5ccf 100644 --- a/modules/eks/efs-controller/main.tf +++ b/deprecated/eks/efs-controller/main.tf @@ -14,7 +14,7 @@ resource "kubernetes_namespace" "default" { module "efs_controller" { source = "cloudposse/helm-release/aws" - version = "0.5.0" + version = "0.9.1" name = var.name chart = var.chart @@ -50,7 +50,7 @@ module "efs_controller" { # annotations: # storageclass.kubernetes.io/is-default-class: "true" parameters = { - fileSystemId = module.efs.outputs.efs_id + fileSystemId = local.enabled ? module.efs.outputs.efs_id : "" provisioningMode = "efs-ap" directoryPerms = "700" basePath = "/efs_controller" diff --git a/modules/eks/efs-controller/outputs.tf b/deprecated/eks/efs-controller/outputs.tf similarity index 100% rename from modules/eks/efs-controller/outputs.tf rename to deprecated/eks/efs-controller/outputs.tf diff --git a/deprecated/eks/efs-controller/provider-helm.tf b/deprecated/eks/efs-controller/provider-helm.tf new file mode 100644 index 000000000..64459d4f4 --- /dev/null +++ b/deprecated/eks/efs-controller/provider-helm.tf @@ -0,0 +1,166 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" +} + +variable "kubeconfig_context" { + type = string + default = "" + description = "Context to choose from the Kubernetes kube config file" +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = base64decode(local.certificate_authority_data) + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster + # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. + config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + config_context = var.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = base64decode(local.certificate_authority_data) + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster + # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. + config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + config_context = var.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/deprecated/eks/efs-controller/providers.tf b/deprecated/eks/efs-controller/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/deprecated/eks/efs-controller/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/efs-controller/remote-state.tf b/deprecated/eks/efs-controller/remote-state.tf similarity index 87% rename from modules/eks/efs-controller/remote-state.tf rename to deprecated/eks/efs-controller/remote-state.tf index 9e0f8f81b..cedf32782 100644 --- a/modules/eks/efs-controller/remote-state.tf +++ b/deprecated/eks/efs-controller/remote-state.tf @@ -1,6 +1,6 @@ module "efs" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" component = var.efs_component_name @@ -9,10 +9,9 @@ module "efs" { module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" component = var.eks_component_name context = module.this.context } - diff --git a/modules/eks/efs-controller/resources/iam_policy_statements.yaml b/deprecated/eks/efs-controller/resources/iam_policy_statements.yaml similarity index 82% rename from modules/eks/efs-controller/resources/iam_policy_statements.yaml rename to deprecated/eks/efs-controller/resources/iam_policy_statements.yaml index d56fbba86..6cd70e71e 100644 --- a/modules/eks/efs-controller/resources/iam_policy_statements.yaml +++ b/deprecated/eks/efs-controller/resources/iam_policy_statements.yaml @@ -5,14 +5,17 @@ AllowEFSDescribeOnAllResources: actions: - elasticfilesystem:DescribeAccessPoints - elasticfilesystem:DescribeFileSystems + - elasticfilesystem:DescribeMountTargets + - ec2:DescribeAvailabilityZones resources: ["*"] conditions: [] -AllowConditionalEFSCreateAccessPoint: - sid: "AllowConditionalEFSCreateAccessPoint" +AllowConditionalEFSAccess: + sid: "AllowConditionalEFSAccess" effect: Allow actions: - elasticfilesystem:CreateAccessPoint + - elasticfilesystem:TagResource resources: ["*"] conditions: - test: "StringLike" diff --git a/modules/eks/efs-controller/resources/values.yaml b/deprecated/eks/efs-controller/resources/values.yaml similarity index 61% rename from modules/eks/efs-controller/resources/values.yaml rename to deprecated/eks/efs-controller/resources/values.yaml index 48dd69eb0..b3fa9c76b 100644 --- a/modules/eks/efs-controller/resources/values.yaml +++ b/deprecated/eks/efs-controller/resources/values.yaml @@ -1,3 +1,3 @@ controller: serviceAccount: - create: "true" \ No newline at end of file + create: "true" diff --git a/modules/eks/efs-controller/variables.tf b/deprecated/eks/efs-controller/variables.tf similarity index 100% rename from modules/eks/efs-controller/variables.tf rename to deprecated/eks/efs-controller/variables.tf diff --git a/modules/datadog-agent/versions.tf b/deprecated/eks/efs-controller/versions.tf similarity index 55% rename from modules/datadog-agent/versions.tf rename to deprecated/eks/efs-controller/versions.tf index 19dd8f964..14c085342 100644 --- a/modules/datadog-agent/versions.tf +++ b/deprecated/eks/efs-controller/versions.tf @@ -4,15 +4,15 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } helm = { source = "hashicorp/helm" - version = ">= 2.3.0" + version = ">= 2.0" } - utils = { - source = "cloudposse/utils" - version = ">= 0.3.0" + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0, != 2.21.0" } } } diff --git a/modules/eks/eks-without-spotinst/README.md b/deprecated/eks/eks-without-spotinst/README.md similarity index 97% rename from modules/eks/eks-without-spotinst/README.md rename to deprecated/eks/eks-without-spotinst/README.md index 82e37ac70..3f0e2344a 100644 --- a/modules/eks/eks-without-spotinst/README.md +++ b/deprecated/eks/eks-without-spotinst/README.md @@ -6,10 +6,10 @@ NOTE: This component can only be deployed after logging in to AWS via Federated If Spotinst is going to be used, the following course of action needs to be followed: 1. Create Spotinst account and subscribe to a Business Plan. -1. Provision [spotinst-integration](../spotinst-integration), as documented in the component. +1. Provision [spotinst-integration](https://spot.io/), as documented in the component. 1. Provision EKS with Spotinst Ocean pool only. -1. Deploy core K8s components, including [metrics-server](../metrics-server), [external-dns](../external-dns), etc. -1. Deploy Spotinst [ocean-controller](../ocean-controller). +1. Deploy core K8s components, including [metrics-server](https://docs.cloudposse.com/components/library/aws/eks/metrics-server), [external-dns](https://docs.cloudposse.com/components/library/aws/eks/external-dns), etc. +1. Deploy Spotinst [ocean-controller](https://docs.spot.io/ocean/tutorials/spot-kubernetes-controller/). ## Usage @@ -64,7 +64,7 @@ components: | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 3.0 | +| [aws](#requirement\_aws) | >= 3.0 | ## Providers @@ -74,15 +74,15 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [delegated\_roles](#module\_delegated\_roles) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | +| [delegated\_roles](#module\_delegated\_roles) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | | [eks\_cluster](#module\_eks\_cluster) | cloudposse/eks-cluster/aws | 0.44.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [primary\_roles](#module\_primary\_roles) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | +| [primary\_roles](#module\_primary\_roles) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | | [region\_node\_group](#module\_region\_node\_group) | ./modules/node_group_by_region | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | -| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | ## Resources @@ -126,7 +126,6 @@ No resources. | [iam\_primary\_roles\_tenant\_name](#input\_iam\_primary\_roles\_tenant\_name) | The name of the tenant where the IAM primary roles are provisioned | `string` | `null` | no | | [iam\_roles\_environment\_name](#input\_iam\_roles\_environment\_name) | The name of the environment where the IAM roles are provisioned | `string` | `"gbl"` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | Name of `kubeconfig` file to use to configure Kubernetes provider | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | Set true to configure Kubernetes provider with a `kubeconfig` file specified by `kubeconfig_file`.
Mainly for when the standard configuration produces a Terraform error. | `bool` | `false` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | diff --git a/modules/eks/efs/context.tf b/deprecated/eks/eks-without-spotinst/context.tf similarity index 100% rename from modules/eks/efs/context.tf rename to deprecated/eks/eks-without-spotinst/context.tf diff --git a/modules/acm/default.auto.tfvars b/deprecated/eks/eks-without-spotinst/default.auto.tfvars similarity index 100% rename from modules/acm/default.auto.tfvars rename to deprecated/eks/eks-without-spotinst/default.auto.tfvars diff --git a/modules/eks/eks-without-spotinst/eks-node-groups.tf b/deprecated/eks/eks-without-spotinst/eks-node-groups.tf similarity index 100% rename from modules/eks/eks-without-spotinst/eks-node-groups.tf rename to deprecated/eks/eks-without-spotinst/eks-node-groups.tf diff --git a/modules/eks/eks-without-spotinst/main.tf b/deprecated/eks/eks-without-spotinst/main.tf similarity index 99% rename from modules/eks/eks-without-spotinst/main.tf rename to deprecated/eks/eks-without-spotinst/main.tf index 866ff4ee9..369edc235 100644 --- a/modules/eks/eks-without-spotinst/main.tf +++ b/deprecated/eks/eks-without-spotinst/main.tf @@ -56,7 +56,7 @@ module "eks_cluster" { # exec_auth is more reliable than data_auth when the aws CLI is available # Details at https://github.com/cloudposse/terraform-aws-eks-cluster/releases/tag/0.42.0 kube_exec_auth_enabled = !var.kubeconfig_file_enabled - # If using `exec` method (recommended) for authentication, provide an explict + # If using `exec` method (recommended) for authentication, provide an explicit # IAM role ARN to exec as for authentication to EKS cluster. kube_exec_auth_role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) kube_exec_auth_role_arn_enabled = true @@ -125,4 +125,3 @@ module "eks_cluster" { context = module.this.context } - diff --git a/modules/eks/eks-without-spotinst/context.tf b/deprecated/eks/eks-without-spotinst/modules/node_group_by_az/context.tf similarity index 100% rename from modules/eks/eks-without-spotinst/context.tf rename to deprecated/eks/eks-without-spotinst/modules/node_group_by_az/context.tf diff --git a/modules/eks/eks-without-spotinst/modules/node_group_by_az/main.tf b/deprecated/eks/eks-without-spotinst/modules/node_group_by_az/main.tf similarity index 93% rename from modules/eks/eks-without-spotinst/modules/node_group_by_az/main.tf rename to deprecated/eks/eks-without-spotinst/modules/node_group_by_az/main.tf index cc965d431..939007d37 100644 --- a/modules/eks/eks-without-spotinst/modules/node_group_by_az/main.tf +++ b/deprecated/eks/eks-without-spotinst/modules/node_group_by_az/main.tf @@ -1,16 +1,19 @@ -data "aws_subnet_ids" "private" { +data "aws_subnets" "private" { count = local.enabled ? 1 : 0 - vpc_id = var.cluster_context.vpc_id - - tags = { - (var.cluster_context.subnet_type_tag_key) = "private" + filter { + name = "vpc-id" + values = [var.cluster_context.vpc_id] } filter { name = "availability-zone" values = [var.availability_zone] } + + tags = { + (var.cluster_context.subnet_type_tag_key) = "private" + } } module "az_abbreviation" { @@ -21,7 +24,7 @@ module "az_abbreviation" { locals { enabled = module.this.enabled && length(var.availability_zone) > 0 sentinel = "~~" - subnet_ids_test = coalescelist(flatten(data.aws_subnet_ids.private[*].ids), [local.sentinel]) + subnet_ids_test = coalescelist(flatten(data.aws_subnets.private[*].ids), [local.sentinel]) subnet_ids = local.subnet_ids_test[0] == local.sentinel ? null : local.subnet_ids_test az_map = var.cluster_context.az_abbreviation_type == "short" ? module.az_abbreviation.region_az_alt_code_maps.to_short : module.az_abbreviation.region_az_alt_code_maps.to_fixed az_attribute = local.az_map[var.availability_zone] diff --git a/modules/eks/eks-without-spotinst/modules/node_group_by_az/outputs.tf b/deprecated/eks/eks-without-spotinst/modules/node_group_by_az/outputs.tf similarity index 100% rename from modules/eks/eks-without-spotinst/modules/node_group_by_az/outputs.tf rename to deprecated/eks/eks-without-spotinst/modules/node_group_by_az/outputs.tf diff --git a/modules/eks/eks-without-spotinst/modules/node_group_by_az/variables.tf b/deprecated/eks/eks-without-spotinst/modules/node_group_by_az/variables.tf similarity index 100% rename from modules/eks/eks-without-spotinst/modules/node_group_by_az/variables.tf rename to deprecated/eks/eks-without-spotinst/modules/node_group_by_az/variables.tf diff --git a/modules/eks/eks-without-spotinst/modules/node_group_by_az/context.tf b/deprecated/eks/eks-without-spotinst/modules/node_group_by_region/context.tf similarity index 100% rename from modules/eks/eks-without-spotinst/modules/node_group_by_az/context.tf rename to deprecated/eks/eks-without-spotinst/modules/node_group_by_region/context.tf diff --git a/modules/eks/eks-without-spotinst/modules/node_group_by_region/main.tf b/deprecated/eks/eks-without-spotinst/modules/node_group_by_region/main.tf similarity index 100% rename from modules/eks/eks-without-spotinst/modules/node_group_by_region/main.tf rename to deprecated/eks/eks-without-spotinst/modules/node_group_by_region/main.tf diff --git a/modules/eks/eks-without-spotinst/modules/node_group_by_region/outputs.tf b/deprecated/eks/eks-without-spotinst/modules/node_group_by_region/outputs.tf similarity index 100% rename from modules/eks/eks-without-spotinst/modules/node_group_by_region/outputs.tf rename to deprecated/eks/eks-without-spotinst/modules/node_group_by_region/outputs.tf diff --git a/modules/eks/eks-without-spotinst/modules/node_group_by_region/variables.tf b/deprecated/eks/eks-without-spotinst/modules/node_group_by_region/variables.tf similarity index 100% rename from modules/eks/eks-without-spotinst/modules/node_group_by_region/variables.tf rename to deprecated/eks/eks-without-spotinst/modules/node_group_by_region/variables.tf diff --git a/modules/eks/eks-without-spotinst/outputs.tf b/deprecated/eks/eks-without-spotinst/outputs.tf similarity index 99% rename from modules/eks/eks-without-spotinst/outputs.tf rename to deprecated/eks/eks-without-spotinst/outputs.tf index 80b367532..25b80ec72 100644 --- a/modules/eks/eks-without-spotinst/outputs.tf +++ b/deprecated/eks/eks-without-spotinst/outputs.tf @@ -67,4 +67,3 @@ output "eks_node_group_statuses" { description = "Status of the EKS Node Group" value = compact([for group in local.node_groups : group.eks_node_group_status]) } - diff --git a/modules/eks/eks-without-spotinst/providers.tf b/deprecated/eks/eks-without-spotinst/providers.tf similarity index 53% rename from modules/eks/eks-without-spotinst/providers.tf rename to deprecated/eks/eks-without-spotinst/providers.tf index 9610c5073..195cc9718 100644 --- a/modules/eks/eks-without-spotinst/providers.tf +++ b/deprecated/eks/eks-without-spotinst/providers.tf @@ -2,8 +2,6 @@ provider "aws" { region = var.region assume_role { - # `terraform import` will not use data from a data source, - # so on import we have to explicitly specify the role # WARNING: # The EKS cluster is owned by the role that created it, and that # role is the only role that can access the cluster without an @@ -11,9 +9,8 @@ provider "aws" { # with the provisioned Terraform role and not an SSO role that could # be removed without notice. # - # i.e. Only NON SSO assumed roles such as spacelift assumed roles, can - # plan this terraform module. - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + # This should only be run using the target account's Terraform role. + role_arn = module.iam_roles.terraform_role_arn } } @@ -21,9 +18,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/eks-without-spotinst/remote-state.tf b/deprecated/eks/eks-without-spotinst/remote-state.tf similarity index 92% rename from modules/eks/eks-without-spotinst/remote-state.tf rename to deprecated/eks/eks-without-spotinst/remote-state.tf index 1560c930d..46e18213f 100644 --- a/modules/eks/eks-without-spotinst/remote-state.tf +++ b/deprecated/eks/eks-without-spotinst/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.4.1" component = "vpc" @@ -9,7 +9,7 @@ module "vpc" { module "vpc_ingress" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.4.1" for_each = toset(var.allow_ingress_from_vpc_stages) @@ -21,7 +21,7 @@ module "vpc_ingress" { module "primary_roles" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.4.1" component = "iam-primary-roles" @@ -34,7 +34,7 @@ module "primary_roles" { module "delegated_roles" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.4.1" component = "iam-delegated-roles" @@ -48,7 +48,7 @@ module "delegated_roles" { # to it rather than overwrite it (specifically the aws-auth configMap) module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.4.1" component = var.eks_component_name diff --git a/modules/eks/eks-without-spotinst/variables.tf b/deprecated/eks/eks-without-spotinst/variables.tf similarity index 100% rename from modules/eks/eks-without-spotinst/variables.tf rename to deprecated/eks/eks-without-spotinst/variables.tf diff --git a/modules/eks/eks-without-spotinst/versions.tf b/deprecated/eks/eks-without-spotinst/versions.tf similarity index 89% rename from modules/eks/eks-without-spotinst/versions.tf rename to deprecated/eks/eks-without-spotinst/versions.tf index 9f0fb337c..8da21ddd5 100644 --- a/modules/eks/eks-without-spotinst/versions.tf +++ b/deprecated/eks/eks-without-spotinst/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 3.0" + version = ">= 3.0" } # spotinst = { # source = "spotinst/spotinst" diff --git a/modules/eks/karpenter-provisioner/README.md b/deprecated/eks/karpenter-provisioner/README.md similarity index 67% rename from modules/eks/karpenter-provisioner/README.md rename to deprecated/eks/karpenter-provisioner/README.md index 3da2d102b..5b79ab02d 100644 --- a/modules/eks/karpenter-provisioner/README.md +++ b/deprecated/eks/karpenter-provisioner/README.md @@ -1,12 +1,22 @@ # Component: `eks/karpenter-provisioner` +> [!WARNING] +> +> #### This component is DEPRECATED +> +> With v1beta1 of Karpenter, the `provisioner` component is deprecated. +> Please use the `eks/karpenter-node-group` component instead. +> +> For more details, see the [Karpenter v1beta1 release notes](/modules/eks/karpenter/CHANGELOG.md). + This component deploys [Karpenter provisioners](https://karpenter.sh/v0.18.0/aws/provisioning) on an EKS cluster. ## Usage **Stack Level**: Regional -If provisioning more than one provisioner, it is [best practice](https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive-or-weighted) +If provisioning more than one provisioner, it is +[best practice](https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive-or-weighted) to create provisioners that are mutually exclusive or weighted. ```yaml @@ -42,57 +52,64 @@ components: # and capacity type (such as AWS spot or on-demand). # See https://karpenter.sh/v0.18.0/provisioner/#specrequirements for more details requirements: - - key: "karpenter.sh/capacity-type" - operator: "In" - values: - - "on-demand" - - "spot" - - key: "node.kubernetes.io/instance-type" - operator: "In" - # See https://aws.amazon.com/ec2/instance-explorer/ and https://aws.amazon.com/ec2/instance-types/ - # Values limited by DenyEC2InstancesWithoutEncryptionInTransit service control policy - # See https://github.com/cloudposse/terraform-aws-service-control-policies/blob/master/catalog/ec2-policies.yaml - # Karpenter recommends allowing at least 20 instance types to ensure availability. - values: - - "c5n.2xlarge" - - "c5n.xlarge" - - "c5n.large" - - "c6i.2xlarge" - - "c6i.xlarge" - - "c6i.large" - - "m5n.2xlarge" - - "m5n.xlarge" - - "m5n.large" - - "m5zn.2xlarge" - - "m5zn.xlarge" - - "m5zn.large" - - "m6i.2xlarge" - - "m6i.xlarge" - - "m6i.large" - - "r5n.2xlarge" - - "r5n.xlarge" - - "r5n.large" - - "r6i.2xlarge" - - "r6i.xlarge" - - "r6i.large" - - key: "kubernetes.io/arch" - operator: "In" - values: - - "amd64" + - key: "karpenter.k8s.aws/instance-category" + operator: "In" + values: ["c", "m", "r"] + - key: "karpenter.k8s.aws/instance-generation" + operator: "Gt" + values: ["2"] + - key: "karpenter.sh/capacity-type" + operator: "In" + values: + - "on-demand" + - "spot" + - key: "node.kubernetes.io/instance-type" + operator: "In" + # See https://aws.amazon.com/ec2/instance-explorer/ and https://aws.amazon.com/ec2/instance-types/ + # Values limited by DenyEC2InstancesWithoutEncryptionInTransit service control policy + # See https://github.com/cloudposse/terraform-aws-service-control-policies/blob/master/catalog/ec2-policies.yaml + # Karpenter recommends allowing at least 20 instance types to ensure availability. + values: + - "c5n.2xlarge" + - "c5n.xlarge" + - "c5n.large" + - "c6i.2xlarge" + - "c6i.xlarge" + - "c6i.large" + - "m5n.2xlarge" + - "m5n.xlarge" + - "m5n.large" + - "m5zn.2xlarge" + - "m5zn.xlarge" + - "m5zn.large" + - "m6i.2xlarge" + - "m6i.xlarge" + - "m6i.large" + - "r5n.2xlarge" + - "r5n.xlarge" + - "r5n.large" + - "r6i.2xlarge" + - "r6i.xlarge" + - "r6i.large" + - key: "kubernetes.io/arch" + operator: "In" + values: + - "amd64" # The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM) # Bottlerocket, AL2, Ubuntu # https://karpenter.sh/v0.18.0/aws/provisioning/#amazon-machine-image-ami-family ami_family: AL2 # Karpenter provisioner block device mappings. block_device_mappings: - - deviceName: /dev/xvda - ebs: - volumeSize: 200Gi - volumeType: gp3 - encrypted: true - deleteOnTermination: true + - deviceName: /dev/xvda + ebs: + volumeSize: 200Gi + volumeType: gp3 + encrypted: true + deleteOnTermination: true ``` + ## Requirements @@ -101,23 +118,23 @@ components: | [terraform](#requirement\_terraform) | >= 1.3.0 | | [aws](#requirement\_aws) | >= 4.9.0 | | [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.14.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.14.0, != 2.21.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.9.0 | -| [kubernetes](#provider\_kubernetes) | >= 2.14.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.14.0, != 2.21.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -139,17 +156,16 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -159,7 +175,7 @@ components: | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [provisioners](#input\_provisioners) | Karpenter provisioners config |
map(object({
# The name of the Karpenter provisioner
name = string
# Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets
private_subnets_enabled = bool
# Configures Karpenter to terminate empty nodes after the specified number of seconds. This behavior can be disabled by setting the value to `null` (never scales down if not set)
ttl_seconds_after_empty = number
# Configures Karpenter to terminate nodes when a maximum age is reached. This behavior can be disabled by setting the value to `null` (never expires if not set)
ttl_seconds_until_expired = number
# Karpenter provisioner total CPU limit for all pods running on the EC2 instances launched by Karpenter
total_cpu_limit = string
# Karpenter provisioner total memory limit for all pods running on the EC2 instances launched by Karpenter
total_memory_limit = string
# Set acceptable (In) and unacceptable (Out) Kubernetes and Karpenter values for node provisioning based on Well-Known Labels and cloud-specific settings. These can include instance types, zones, computer architecture, and capacity type (such as AWS spot or on-demand). See https://karpenter.sh/v0.18.0/provisioner/#specrequirements for more details
requirements = list(object({
key = string
operator = string
values = list(string)
}))
# Karpenter provisioner taints configuration. See https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive for more details
taints = optional(list(object({
key = string
effect = string
value = string
})))
startup_taints = optional(list(object({
key = string
effect = string
value = string
})))
# Karpenter provisioner metadata options. See https://karpenter.sh/v0.18.0/aws/provisioning/#metadata-options for more details
metadata_options = optional(map(string), {})
# The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM)
ami_family = string
# Karpenter provisioner block device mappings. Controls the Elastic Block Storage volumes that Karpenter attaches to provisioned nodes. Karpenter uses default block device mappings for the AMI Family specified. For example, the Bottlerocket AMI Family defaults with two block device mappings. See https://karpenter.sh/v0.18.0/aws/provisioning/#block-device-mappings for more details
block_device_mappings = list(object({
deviceName = string
ebs = optional(object({
volumeSize = string
volumeType = string
deleteOnTermination = optional(bool, true)
encrypted = optional(bool, true)
iops = optional(number)
kmsKeyID = optional(string, "alias/aws/ebs")
snapshotID = optional(string)
throughput = optional(number)
}))
}))
}))
| n/a | yes | +| [provisioners](#input\_provisioners) | Karpenter provisioners config |
map(object({
# The name of the Karpenter provisioner
name = string
# Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets
private_subnets_enabled = optional(bool, true)
# Configures Karpenter to terminate empty nodes after the specified number of seconds. This behavior can be disabled by setting the value to `null` (never scales down if not set)
# Conflicts with `consolidation.enabled`, which is usually a better option.
ttl_seconds_after_empty = optional(number, null)
# Configures Karpenter to terminate nodes when a maximum age is reached. This behavior can be disabled by setting the value to `null` (never expires if not set)
ttl_seconds_until_expired = optional(number, null)
# Continuously binpack containers into least possible number of nodes. Mutually exclusive with ttl_seconds_after_empty.
# Ideally `true` by default, but conflicts with `ttl_seconds_after_empty`, which was previously the only option.
consolidation = optional(object({
enabled = bool
}), { enabled = false })
# Karpenter provisioner total CPU limit for all pods running on the EC2 instances launched by Karpenter
total_cpu_limit = string
# Karpenter provisioner total memory limit for all pods running on the EC2 instances launched by Karpenter
total_memory_limit = string
# Set acceptable (In) and unacceptable (Out) Kubernetes and Karpenter values for node provisioning based on Well-Known Labels and cloud-specific settings. These can include instance types, zones, computer architecture, and capacity type (such as AWS spot or on-demand). See https://karpenter.sh/v0.18.0/provisioner/#specrequirements for more details
requirements = list(object({
key = string
operator = string
values = list(string)
}))
# Karpenter provisioner taints configuration. See https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive for more details
taints = optional(list(object({
key = string
effect = string
value = string
})), [])
startup_taints = optional(list(object({
key = string
effect = string
value = string
})), [])
# Karpenter provisioner metadata options. See https://karpenter.sh/v0.18.0/aws/provisioning/#metadata-options for more details
metadata_options = optional(object({
httpEndpoint = optional(string, "enabled"), # valid values: enabled, disabled
httpProtocolIPv6 = optional(string, "disabled"), # valid values: enabled, disabled
httpPutResponseHopLimit = optional(number, 2), # limit of 1 discouraged because it keeps Pods from reaching metadata service
httpTokens = optional(string, "required") # valid values: required, optional
})),
# The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM)
ami_family = string
# Karpenter provisioner block device mappings. Controls the Elastic Block Storage volumes that Karpenter attaches to provisioned nodes. Karpenter uses default block device mappings for the AMI Family specified. For example, the Bottlerocket AMI Family defaults with two block device mappings. See https://karpenter.sh/v0.18.0/aws/provisioning/#block-device-mappings for more details
block_device_mappings = optional(list(object({
deviceName = string
ebs = optional(object({
volumeSize = string
volumeType = string
deleteOnTermination = optional(bool, true)
encrypted = optional(bool, true)
iops = optional(number)
kmsKeyID = optional(string, "alias/aws/ebs")
snapshotID = optional(string)
throughput = optional(number)
}))
})), [])
}))
| n/a | yes | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | @@ -173,6 +189,7 @@ components: | [providers](#output\_providers) | Deployed Karpenter AWSNodeTemplates | | [provisioners](#output\_provisioners) | Deployed Karpenter provisioners | + ## References diff --git a/modules/eks/eks-without-spotinst/modules/node_group_by_region/context.tf b/deprecated/eks/karpenter-provisioner/context.tf similarity index 100% rename from modules/eks/eks-without-spotinst/modules/node_group_by_region/context.tf rename to deprecated/eks/karpenter-provisioner/context.tf diff --git a/modules/eks/karpenter-provisioner/main.tf b/deprecated/eks/karpenter-provisioner/main.tf similarity index 59% rename from modules/eks/karpenter-provisioner/main.tf rename to deprecated/eks/karpenter-provisioner/main.tf index a4698822c..5285f42aa 100644 --- a/modules/eks/karpenter-provisioner/main.tf +++ b/deprecated/eks/karpenter-provisioner/main.tf @@ -22,31 +22,48 @@ resource "kubernetes_manifest" "provisioner" { metadata = { name = each.value.name } - spec = merge({ - limits = { - resources = { - cpu = each.value.total_cpu_limit - memory = each.value.total_memory_limit + spec = merge( + { + limits = { + resources = { + cpu = each.value.total_cpu_limit + memory = each.value.total_memory_limit + } } - } - providerRef = { - name = each.value.name - } - requirements = each.value.requirements - # Do not include keys with null values, or else Terraform will show a perpetual diff. - # Use `try(length(),0)` to detect both empty lists and nulls. - }, try(length(each.value.taints), 0) == 0 ? {} : { - taints = each.value.taints - }, try(length(each.value.startup_taints), 0) == 0 ? {} : { - startupTaints = each.value.startup_taints - }, each.value.ttl_seconds_after_empty == null ? {} : { - ttlSecondsAfterEmpty = each.value.ttl_seconds_after_empty - }, each.value.ttl_seconds_until_expired == null ? {} : { - ttlSecondsUntilExpired = each.value.ttl_seconds_until_expired - }) + providerRef = { + name = each.value.name + } + requirements = each.value.requirements + consolidation = each.value.consolidation + # Do not include keys with null values, or else Terraform will show a perpetual diff. + # Use `try(length(),0)` to detect both empty lists and nulls. + }, + try(length(each.value.taints), 0) == 0 ? {} : { + taints = each.value.taints + }, + try(length(each.value.startup_taints), 0) == 0 ? {} : { + startupTaints = each.value.startup_taints + }, + each.value.ttl_seconds_after_empty == null ? {} : { + ttlSecondsAfterEmpty = each.value.ttl_seconds_after_empty + }, + each.value.ttl_seconds_until_expired == null ? {} : { + ttlSecondsUntilExpired = each.value.ttl_seconds_until_expired + }, + ) } + # spec.requirements counts as a computed field because defaults may be added by the admission webhook. + computed_fields = ["spec.requirements"] + depends_on = [kubernetes_manifest.provider] + + lifecycle { + precondition { + condition = each.value.consolidation.enabled == false || each.value.ttl_seconds_after_empty == null + error_message = "Consolidation and TTL Seconds After Empty are mutually exclusive." + } + } } locals { @@ -80,8 +97,9 @@ resource "kubernetes_manifest" "provider" { "aws:eks:cluster-name" = local.eks_cluster_id } # https://karpenter.sh/v0.18.0/aws/provisioning/#amazon-machine-image-ami-family - amiFamily = each.value.ami_family - tags = module.this.tags + amiFamily = each.value.ami_family + metadataOptions = each.value.metadata_options + tags = module.this.tags }, try(length(local.provisioner_block_device_mappings[each.key]), 0) == 0 ? {} : { blockDeviceMappings = local.provisioner_block_device_mappings[each.key] }) diff --git a/modules/eks/karpenter-provisioner/outputs.tf b/deprecated/eks/karpenter-provisioner/outputs.tf similarity index 100% rename from modules/eks/karpenter-provisioner/outputs.tf rename to deprecated/eks/karpenter-provisioner/outputs.tf diff --git a/deprecated/eks/karpenter-provisioner/provider-helm.tf b/deprecated/eks/karpenter-provisioner/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/deprecated/eks/karpenter-provisioner/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/deprecated/eks/karpenter-provisioner/providers.tf b/deprecated/eks/karpenter-provisioner/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/deprecated/eks/karpenter-provisioner/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/karpenter-provisioner/remote-state.tf b/deprecated/eks/karpenter-provisioner/remote-state.tf similarity index 87% rename from modules/eks/karpenter-provisioner/remote-state.tf rename to deprecated/eks/karpenter-provisioner/remote-state.tf index c8c7bd15f..cf8ed5c1c 100644 --- a/modules/eks/karpenter-provisioner/remote-state.tf +++ b/deprecated/eks/karpenter-provisioner/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name @@ -9,7 +9,7 @@ module "eks" { module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = "vpc" diff --git a/modules/eks/karpenter-provisioner/variables.tf b/deprecated/eks/karpenter-provisioner/variables.tf similarity index 73% rename from modules/eks/karpenter-provisioner/variables.tf rename to deprecated/eks/karpenter-provisioner/variables.tf index 6745ad8b5..3e31e75aa 100644 --- a/modules/eks/karpenter-provisioner/variables.tf +++ b/deprecated/eks/karpenter-provisioner/variables.tf @@ -14,11 +14,17 @@ variable "provisioners" { # The name of the Karpenter provisioner name = string # Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets - private_subnets_enabled = bool + private_subnets_enabled = optional(bool, true) # Configures Karpenter to terminate empty nodes after the specified number of seconds. This behavior can be disabled by setting the value to `null` (never scales down if not set) - ttl_seconds_after_empty = number + # Conflicts with `consolidation.enabled`, which is usually a better option. + ttl_seconds_after_empty = optional(number, null) # Configures Karpenter to terminate nodes when a maximum age is reached. This behavior can be disabled by setting the value to `null` (never expires if not set) - ttl_seconds_until_expired = number + ttl_seconds_until_expired = optional(number, null) + # Continuously binpack containers into least possible number of nodes. Mutually exclusive with ttl_seconds_after_empty. + # Ideally `true` by default, but conflicts with `ttl_seconds_after_empty`, which was previously the only option. + consolidation = optional(object({ + enabled = bool + }), { enabled = false }) # Karpenter provisioner total CPU limit for all pods running on the EC2 instances launched by Karpenter total_cpu_limit = string # Karpenter provisioner total memory limit for all pods running on the EC2 instances launched by Karpenter @@ -34,18 +40,23 @@ variable "provisioners" { key = string effect = string value = string - }))) + })), []) startup_taints = optional(list(object({ key = string effect = string value = string - }))) + })), []) # Karpenter provisioner metadata options. See https://karpenter.sh/v0.18.0/aws/provisioning/#metadata-options for more details - metadata_options = optional(map(string), {}) + metadata_options = optional(object({ + httpEndpoint = optional(string, "enabled"), # valid values: enabled, disabled + httpProtocolIPv6 = optional(string, "disabled"), # valid values: enabled, disabled + httpPutResponseHopLimit = optional(number, 2), # limit of 1 discouraged because it keeps Pods from reaching metadata service + httpTokens = optional(string, "required") # valid values: required, optional + })), # The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM) ami_family = string # Karpenter provisioner block device mappings. Controls the Elastic Block Storage volumes that Karpenter attaches to provisioned nodes. Karpenter uses default block device mappings for the AMI Family specified. For example, the Bottlerocket AMI Family defaults with two block device mappings. See https://karpenter.sh/v0.18.0/aws/provisioning/#block-device-mappings for more details - block_device_mappings = list(object({ + block_device_mappings = optional(list(object({ deviceName = string ebs = optional(object({ volumeSize = string @@ -57,7 +68,7 @@ variable "provisioners" { snapshotID = optional(string) throughput = optional(number) })) - })) + })), []) })) description = "Karpenter provisioners config" } diff --git a/deprecated/eks/karpenter-provisioner/versions.tf b/deprecated/eks/karpenter-provisioner/versions.tf new file mode 100644 index 000000000..9f0f54df7 --- /dev/null +++ b/deprecated/eks/karpenter-provisioner/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.14.0, != 2.21.0" + } + } +} diff --git a/deprecated/eks/karpenter/CHANGELOG.md b/deprecated/eks/karpenter/CHANGELOG.md new file mode 100644 index 000000000..f3ff1cc20 --- /dev/null +++ b/deprecated/eks/karpenter/CHANGELOG.md @@ -0,0 +1,85 @@ +## Version 1.348.0 + +Components PR [#868](https://github.com/cloudposse/terraform-aws-components/pull/868) + +The `karpenter-crd` helm chart can now be installed alongside the `karpenter` helm chart to automatically manage the +lifecycle of Karpenter CRDs. However since this chart must be installed before the `karpenter` helm chart, the +Kubernetes namespace must be available before either chart is deployed. Furthermore, this namespace should persist +whether or not the `karpenter-crd` chart is deployed, so it should not be installed with that given `helm-release` +resource. Therefore, we've moved namespace creation to a separate resource that runs before both charts. Terraform will +handle that namespace state migration with the `moved` block. + +There are several scenarios that may or may not require additional steps. Please review the following scenarios and +follow the steps for your given requirements. + +### Upgrading an existing `eks/karpenter` deployment without changes + +If you currently have `eks/karpenter` deployed to an EKS cluster and have upgraded to this version of the component, no +changes are required. `var.crd_chart_enabled` will default to `false`. + +### Upgrading an existing `eks/karpenter` deployment and deploying the `karpenter-crd` chart + +If you currently have `eks/karpenter` deployed to an EKS cluster, have upgraded to this version of the component, do not +currently have the `karpenter-crd` chart installed, and want to now deploy the `karpenter-crd` helm chart, a few +additional steps are required! + +First, set `var.crd_chart_enabled` to `true`. + +Next, update the installed Karpenter CRDs in order for Helm to automatically take over their management when the +`karpenter-crd` chart is deployed. We have included a script to run that upgrade. Run the `./karpenter-crd-upgrade` +script or run the following commands on the given cluster before deploying the chart. Please note that this script or +commands will only need to be run on first use of the CRD chart. + +Before running the script, ensure that the `kubectl` context is set to the cluster where the `karpenter` helm chart is +deployed. In Geodesic, you can usually do this with the `set-cluster` command, though your configuration may vary. + +```bash +set-cluster -- terraform +``` + +Then run the script or commands: + +```bash +kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite +kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite +kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite +``` + +:::info + +Previously the `karpenter-crd-upgrade` script included deploying the `karpenter-crd` chart. Now that this chart is moved +to Terraform, that helm deployment is no longer necessary. + +For reference, the `karpenter-crd` chart can be installed with helm with the following: + +```bash +helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "$VERSION" --namespace karpenter +``` + +::: + +Now that the CRDs are upgraded, the component is ready to be applied. Apply the `eks/karpenter` component and then apply +`eks/karpenter-provisioner`. + +#### Note for upgrading Karpenter from before v0.27.3 to v0.27.3 or later + +If you are upgrading Karpenter from before v0.27.3 to v0.27.3 or later, you may need to run the following command to +remove an obsolete webhook: + +```bash +kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh +``` + +See [the Karpenter upgrade guide](https://karpenter.sh/v0.32/upgrading/upgrade-guide/#upgrading-to-v0273) for more +details. + +### Upgrading an existing `eks/karpenter` deployment where the `karpenter-crd` chart is already deployed + +If you currently have `eks/karpenter` deployed to an EKS cluster, have upgraded to this version of the component, and +already have the `karpenter-crd` chart installed, simply set `var.crd_chart_enabled` to `true` and redeploy Terraform to +have Terraform manage the helm release for `karpenter-crd`. + +### Net new deployments + +If you are initially deploying `eks/karpenter`, no changes are required, but we recommend installing the CRD chart. Set +`var.crd_chart_enabled` to `true` and continue with deployment. diff --git a/deprecated/eks/karpenter/README.md b/deprecated/eks/karpenter/README.md new file mode 100644 index 000000000..1ad01d35b --- /dev/null +++ b/deprecated/eks/karpenter/README.md @@ -0,0 +1,467 @@ +# Component: `eks/karpenter` + +This component provisions [Karpenter](https://karpenter.sh) on an EKS cluster. It requires at least version 0.19.0 of +Karpenter, though you are encouraged to use the latest version. + +## Usage + +**Stack Level**: Regional + +These instructions assume you are provisioning 2 EKS clusters in the same account and region, named "blue" and "green", +and alternating between them. If you are only using a single cluster, you can ignore the "blue" and "green" references +and remove the `metadata` block from the `karpenter` module. + +```yaml +components: + terraform: + # Base component of all `karpenter` components + eks/karpenter: + metadata: + type: abstract + vars: + enabled: true + eks_component_name: "eks/cluster" + name: "karpenter" + # https://github.com/aws/karpenter/tree/main/charts/karpenter + chart_repository: "oci://public.ecr.aws/karpenter" + chart: "karpenter" + chart_version: "v0.31.0" + create_namespace: true + kubernetes_namespace: "karpenter" + resources: + limits: + cpu: "300m" + memory: "1Gi" + requests: + cpu: "100m" + memory: "512Mi" + cleanup_on_fail: true + atomic: true + wait: true + rbac_enabled: true + # "karpenter-crd" can be installed as an independent helm chart to manage the lifecycle of Karpenter CRDs + crd_chart_enabled: true + crd_chart: "karpenter-crd" + # Set `legacy_create_karpenter_instance_profile` to `false` to allow the `eks/cluster` component + # to manage the instance profile for the nodes launched by Karpenter (recommended for all new clusters). + legacy_create_karpenter_instance_profile: false + # Enable interruption handling to deploy a SQS queue and a set of Event Bridge rules to handle interruption with Karpenter. + interruption_handler_enabled: true + + # Provision `karpenter` component on the blue EKS cluster + eks/karpenter-blue: + metadata: + component: eks/karpenter + inherits: + - eks/karpenter + vars: + eks_component_name: eks/cluster-blue +``` + +## Provision Karpenter on EKS cluster + +Here we describe how to provision Karpenter on an EKS cluster. We will be using the `plat-ue2-dev` stack as an example. + +### Provision Service-Linked Roles for EC2 Spot and EC2 Spot Fleet + +**Note:** If you want to use EC2 Spot for the instances launched by Karpenter, you may need to provision the following +Service-Linked Role for EC2 Spot: + +- Service-Linked Role for EC2 Spot + +This is only necessary if this is the first time you're using EC2 Spot in the account. Since this is a one-time +operation, we recommend you do this manually via the AWS CLI: + +```bash +aws --profile --gbl--admin iam create-service-linked-role --aws-service-name spot.amazonaws.com +``` + +Note that if the Service-Linked Roles already exist in the AWS account (if you used EC2 Spot or Spot Fleet before), and +you try to provision them again, you will see the following errors: + +```text +An error occurred (InvalidInput) when calling the CreateServiceLinkedRole operation: +Service role name AWSServiceRoleForEC2Spot has been taken in this account, please try a different suffix +``` + +For more details, see: + +- https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html +- https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html + +The process of provisioning Karpenter on an EKS cluster consists of 3 steps. + +### 1. Provision EKS Fargate Profile for Karpenter and IAM Role for Nodes Launched by Karpenter + +EKS Fargate Profile for Karpenter and IAM Role for Nodes launched by Karpenter are provisioned by the `eks/cluster` +component: + +```yaml +components: + terraform: + eks/cluster-blue: + metadata: + component: eks/cluster + inherits: + - eks/cluster + vars: + attributes: + - blue + eks_component_name: eks/cluster-blue + node_groups: + main: + instance_types: + - t3.medium + max_group_size: 3 + min_group_size: 1 + fargate_profiles: + karpenter: + kubernetes_namespace: karpenter + kubernetes_labels: null + karpenter_iam_role_enabled: true +``` + +**Notes**: + +- Fargate Profile role ARNs need to be added to the `aws-auth` ConfigMap to allow the Fargate Profile nodes to join the + EKS cluster (this is done by EKS) +- Karpenter IAM role ARN needs to be added to the `aws-auth` ConfigMap to allow the nodes launched by Karpenter to join + the EKS cluster (this is done by the `eks/cluster` component) + +We use EKS Fargate Profile for Karpenter because It is recommended to run Karpenter on an EKS Fargate Profile. + +```text +Karpenter is installed using a Helm chart. The Helm chart installs the Karpenter controller and +a webhook pod as a Deployment that needs to run before the controller can be used for scaling your cluster. +We recommend a minimum of one small node group with at least one worker node. + +As an alternative, you can run these pods on EKS Fargate by creating a Fargate profile for the +karpenter namespace. Doing so will cause all pods deployed into this namespace to run on EKS Fargate. +Do not run Karpenter on a node that is managed by Karpenter. +``` + +See +[Run Karpenter Controller on EKS Fargate](https://aws.github.io/aws-eks-best-practices/karpenter/#run-the-karpenter-controller-on-eks-fargate-or-on-a-worker-node-that-belongs-to-a-node-group) +for more details. + +We provision IAM Role for Nodes launched by Karpenter because they must run with an Instance Profile that grants +permissions necessary to run containers and configure networking. + +We define the IAM role for the Instance Profile in `components/terraform/eks/cluster/karpenter.tf`. + +Note that we provision the EC2 Instance Profile for the Karpenter IAM role in the `components/terraform/eks/karpenter` +component (see the next step). + +Run the following commands to provision the EKS Fargate Profile for Karpenter and the IAM role for instances launched by +Karpenter on the blue EKS cluster and add the role ARNs to the `aws-auth` ConfigMap: + +```bash +atmos terraform plan eks/cluster-blue -s plat-ue2-dev +atmos terraform apply eks/cluster-blue -s plat-ue2-dev +``` + +For more details, refer to: + +- https://karpenter.sh/v0.18.0/getting-started/getting-started-with-terraform +- https://karpenter.sh/v0.18.0/getting-started/getting-started-with-eksctl + +### 2. Provision `karpenter` component + +In this step, we provision the `components/terraform/eks/karpenter` component, which deploys the following resources: + +- EC2 Instance Profile for the nodes launched by Karpenter (note that the IAM role for the Instance Profile is + provisioned in the previous step in the `eks/cluster` component) +- Karpenter Kubernetes controller using the Karpenter Helm Chart and the `helm_release` Terraform resource +- EKS IAM role for Kubernetes Service Account for the Karpenter controller (with all the required permissions) + +Run the following commands to provision the Karpenter component on the blue EKS cluster: + +```bash +atmos terraform plan eks/karpenter-blue -s plat-ue2-dev +atmos terraform apply eks/karpenter-blue -s plat-ue2-dev +``` + +Note that the stack config for the blue Karpenter component is defined in `stacks/catalog/eks/clusters/blue.yaml`. + +```yaml +eks/karpenter-blue: + metadata: + component: eks/karpenter + inherits: + - eks/karpenter + vars: + eks_component_name: eks/cluster-blue +``` + +### 3. Provision `karpenter-provisioner` component + +In this step, we provision the `components/terraform/eks/karpenter-provisioner` component, which deploys Karpenter +[Provisioners](https://karpenter.sh/v0.18.0/aws/provisioning) using the `kubernetes_manifest` resource. + +**NOTE:** We deploy the provisioners in a separate step as a separate component since it uses `kind: Provisioner` CRD +which itself is created by the `karpenter` component in the previous step. + +Run the following commands to deploy the Karpenter provisioners on the blue EKS cluster: + +```bash +atmos terraform plan eks/karpenter-provisioner-blue -s plat-ue2-dev +atmos terraform apply eks/karpenter-provisioner-blue -s plat-ue2-dev +``` + +Note that the stack config for the blue Karpenter provisioner component is defined in +`stacks/catalog/eks/clusters/blue.yaml`. + +```yaml +eks/karpenter-provisioner-blue: + metadata: + component: eks/karpenter-provisioner + inherits: + - eks/karpenter-provisioner + vars: + attributes: + - blue + eks_component_name: eks/cluster-blue +``` + +You can override the default values from the `eks/karpenter-provisioner` base component. + +For your cluster, you will need to review the following configurations for the Karpenter provisioners and update it +according to your requirements: + +- [requirements](https://karpenter.sh/v0.18.0/provisioner/#specrequirements): + + ```yaml + requirements: + - key: "karpenter.sh/capacity-type" + operator: "In" + values: + - "on-demand" + - "spot" + - key: "node.kubernetes.io/instance-type" + operator: "In" + values: + - "m5.xlarge" + - "m5.large" + - "m5.medium" + - "c5.xlarge" + - "c5.large" + - "c5.medium" + - key: "kubernetes.io/arch" + operator: "In" + values: + - "amd64" + ``` + +- `taints`, `startup_taints`, `ami_family` + +- Resource limits/requests for the Karpenter controller itself: + + ```yaml + resources: + limits: + cpu: "300m" + memory: "1Gi" + requests: + cpu: "100m" + memory: "512Mi" + ``` + +- Total CPU and memory limits for all pods running on the EC2 instances launched by Karpenter: + + ```yaml + total_cpu_limit: "1k" + total_memory_limit: "1000Gi" + ``` + +- Config to terminate empty nodes after the specified number of seconds. This behavior can be disabled by setting the + value to `null` (never scales down if not set): + + ```yaml + ttl_seconds_after_empty: 30 + ``` + +- Config to terminate nodes when a maximum age is reached. This behavior can be disabled by setting the value to `null` + (never expires if not set): + + ```yaml + ttl_seconds_until_expired: 2592000 + ``` + +## Node Interruption + +Karpenter also supports listening for and responding to Node Interruption events. If interruption handling is enabled, +Karpenter will watch for upcoming involuntary interruption events that would cause disruption to your workloads. These +interruption events include: + +- Spot Interruption Warnings +- Scheduled Change Health Events (Maintenance Events) +- Instance Terminating Events +- Instance Stopping Events + +:::info + +The Node Interruption Handler is not the same as the Node Termination Handler. The latter is always enabled and cleanly +shuts down the node in 2 minutes in response to a Node Termination event. The former gets advance notice that a node +will soon be terminated, so it can have 5-10 minutes to shut down a node. + +::: + +For more details, see refer to the [Karpenter docs](https://karpenter.sh/v0.32/concepts/disruption/#interruption) and +[FAQ](https://karpenter.sh/v0.32/faq/#interruption-handling) + +To enable Node Interruption handling, set `var.interruption_handler_enabled` to `true`. This will create an SQS queue +and a set of Event Bridge rules to deliver interruption events to Karpenter. + +## Custom Resource Definition (CRD) Management + +Karpenter ships with a few Custom Resource Definitions (CRDs). In earlier versions of this component, when installing a +new version of the `karpenter` helm chart, CRDs were not be upgraded at the same time, requiring manual steps to upgrade +CRDs after deploying the latest chart. However Karpenter now supports an additional, independent helm chart for CRD +management. This helm chart, `karpenter-crd`, can be installed alongside the `karpenter` helm chart to automatically +manage the lifecycle of these CRDs. + +To deploy the `karpenter-crd` helm chart, set `var.crd_chart_enabled` to `true`. (Installing the `karpenter-crd` chart +is recommended. `var.crd_chart_enabled` defaults to `false` to preserve backward compatibility with older versions of +this component.) + +## Troubleshooting + +For Karpenter issues, checkout the [Karpenter Troubleshooting Guide](https://karpenter.sh/docs/troubleshooting/) + +### References + +For more details, refer to: + +- https://karpenter.sh/v0.28.0/provisioner/#specrequirements +- https://karpenter.sh/v0.28.0/aws/provisioning +- https://aws.github.io/aws-eks-best-practices/karpenter/#creating-provisioners +- https://aws.github.io/aws-eks-best-practices/karpenter +- https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [karpenter](#module\_karpenter) | cloudposse/helm-release/aws | 0.10.1 | +| [karpenter\_crd](#module\_karpenter\_crd) | cloudposse/helm-release/aws | 0.10.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_event_rule.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_target.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_iam_instance_profile.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | +| [aws_sqs_queue.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | +| [aws_sqs_queue_policy.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | +| [kubernetes_namespace.default](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_iam_policy_document.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended | `string` | n/a | yes | +| [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history) | `string` | `null` | no | +| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart | `string` | n/a | yes | +| [chart\_values](#input\_chart\_values) | Additional values to yamlencode as `helm_release` values | `any` | `{}` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed | `string` | `null` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [crd\_chart](#input\_crd\_chart) | The name of the Karpenter CRD chart to be installed, if `var.crd_chart_enabled` is set to `true`. | `string` | `"karpenter-crd"` | no | +| [crd\_chart\_enabled](#input\_crd\_chart\_enabled) | `karpenter-crd` can be installed as an independent helm chart to manage the lifecycle of Karpenter CRDs. Set to `true` to install this CRD helm chart before the primary karpenter chart. | `bool` | `false` | no | +| [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false` | `bool` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [interruption\_handler\_enabled](#input\_interruption\_handler\_enabled) | If `true`, deploy a SQS queue and Event Bridge rules to enable interruption handling by Karpenter.

https://karpenter.sh/v0.27.5/concepts/deprovisioning/#interruption | `bool` | `false` | no | +| [interruption\_queue\_message\_retention](#input\_interruption\_queue\_message\_retention) | The message retention in seconds for the interruption handler SQS queue. | `number` | `300` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into | `string` | n/a | yes | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [legacy\_create\_karpenter\_instance\_profile](#input\_legacy\_create\_karpenter\_instance\_profile) | When `true` (the default), this component creates an IAM Instance Profile
for nodes launched by Karpenter, to preserve the legacy behavior.
Set to `false` to disable creation of the IAM Instance Profile, which
avoids conflict with having `eks/cluster` create it.
Use in conjunction with `eks/cluster` component `legacy_do_not_create_karpenter_instance_profile`,
which see for further details. | `bool` | `true` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [rbac\_enabled](#input\_rbac\_enabled) | Enable/disable RBAC | `bool` | `true` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [resources](#input\_resources) | The CPU and memory of the deployment's limits and requests |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
| n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true` | `bool` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instance\_profile](#output\_instance\_profile) | Provisioned EC2 Instance Profile for nodes launched by Karpenter | +| [metadata](#output\_metadata) | Block status of the deployed release | + + + +## References + +- https://karpenter.sh +- https://aws.github.io/aws-eks-best-practices/karpenter +- https://karpenter.sh/v0.18.0/getting-started/getting-started-with-terraform +- https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler +- https://github.com/aws/karpenter +- https://www.eksworkshop.com/beginner/085_scaling_karpenter +- https://ec2spotworkshops.com/karpenter.html +- https://www.eksworkshop.com/beginner/085_scaling_karpenter/install_karpenter +- https://karpenter.sh/v0.18.0/development-guide +- https://karpenter.sh/v0.18.0/aws/provisioning +- https://docs.aws.amazon.com/eks/latest/userguide/pod-execution-role.html +- https://aws.amazon.com/premiumsupport/knowledge-center/fargate-troubleshoot-profile-creation +- https://learn.hashicorp.com/tutorials/terraform/kubernetes-crd-faas +- https://github.com/hashicorp/terraform-provider-kubernetes/issues/1545 +- https://issuemode.com/issues/hashicorp/terraform-provider-kubernetes-alpha/4840198 +- https://bytemeta.vip/repo/hashicorp/terraform-provider-kubernetes/issues/1442 +- https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html + +[](https://cpco.io/component) diff --git a/modules/eks/karpenter-provisioner/context.tf b/deprecated/eks/karpenter/context.tf similarity index 100% rename from modules/eks/karpenter-provisioner/context.tf rename to deprecated/eks/karpenter/context.tf diff --git a/deprecated/eks/karpenter/interruption_handler.tf b/deprecated/eks/karpenter/interruption_handler.tf new file mode 100644 index 000000000..558ee7de1 --- /dev/null +++ b/deprecated/eks/karpenter/interruption_handler.tf @@ -0,0 +1,99 @@ +locals { + interruption_handler_enabled = local.enabled && var.interruption_handler_enabled + interruption_handler_queue_name = module.this.id + + dns_suffix = join("", data.aws_partition.current[*].dns_suffix) + + events = { + health_event = { + name = "HealthEvent" + description = "Karpenter interrupt - AWS health event" + event_pattern = { + source = ["aws.health"] + detail-type = ["AWS Health Event"] + } + } + spot_interupt = { + name = "SpotInterrupt" + description = "Karpenter interrupt - EC2 spot instance interruption warning" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Spot Instance Interruption Warning"] + } + } + instance_rebalance = { + name = "InstanceRebalance" + description = "Karpenter interrupt - EC2 instance rebalance recommendation" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Instance Rebalance Recommendation"] + } + } + instance_state_change = { + name = "InstanceStateChange" + description = "Karpenter interrupt - EC2 instance state-change notification" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Instance State-change Notification"] + } + } + } +} + +data "aws_partition" "current" { + count = local.interruption_handler_enabled ? 1 : 0 +} + +resource "aws_sqs_queue" "interruption_handler" { + count = local.interruption_handler_enabled ? 1 : 0 + + name = local.interruption_handler_queue_name + message_retention_seconds = var.interruption_queue_message_retention + sqs_managed_sse_enabled = true + + tags = module.this.tags +} + +data "aws_iam_policy_document" "interruption_handler" { + count = local.interruption_handler_enabled ? 1 : 0 + + statement { + sid = "SqsWrite" + actions = ["sqs:SendMessage"] + resources = [aws_sqs_queue.interruption_handler[0].arn] + + principals { + type = "Service" + identifiers = [ + "events.${local.dns_suffix}", + "sqs.${local.dns_suffix}", + ] + } + + } +} + +resource "aws_sqs_queue_policy" "interruption_handler" { + count = local.interruption_handler_enabled ? 1 : 0 + + queue_url = aws_sqs_queue.interruption_handler[0].url + policy = data.aws_iam_policy_document.interruption_handler[0].json +} + +resource "aws_cloudwatch_event_rule" "interruption_handler" { + for_each = { for k, v in local.events : k => v if local.interruption_handler_enabled } + + name = "${module.this.id}-${each.value.name}" + description = each.value.description + event_pattern = jsonencode(each.value.event_pattern) + + tags = module.this.tags +} + +resource "aws_cloudwatch_event_target" "interruption_handler" { + for_each = { for k, v in local.events : k => v if local.interruption_handler_enabled } + + rule = aws_cloudwatch_event_rule.interruption_handler[each.key].name + target_id = "KarpenterInterruptionQueueTarget" + arn = aws_sqs_queue.interruption_handler[0].arn +} diff --git a/deprecated/eks/karpenter/karpenter-crd-upgrade b/deprecated/eks/karpenter/karpenter-crd-upgrade new file mode 100755 index 000000000..e6274deb3 --- /dev/null +++ b/deprecated/eks/karpenter/karpenter-crd-upgrade @@ -0,0 +1,24 @@ +#!/bin/bash + +function usage() { + cat >&2 <<'EOF' +./karpenter-crd-upgrade + +Use this script to prepare a cluster for karpenter-crd helm chart support by upgrading Karpenter CRDs. + +EOF +} + +function upgrade() { + set -x + + kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite + kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite + kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite +} + +if (($# == 0)); then + upgrade +else + usage +fi diff --git a/deprecated/eks/karpenter/main.tf b/deprecated/eks/karpenter/main.tf new file mode 100644 index 000000000..1ebf263c4 --- /dev/null +++ b/deprecated/eks/karpenter/main.tf @@ -0,0 +1,236 @@ +# https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler/ +# https://karpenter.sh/ +# https://karpenter.sh/v0.10.1/getting-started/getting-started-with-terraform/ +# https://karpenter.sh/v0.10.1/getting-started/getting-started-with-eksctl/ +# https://www.eksworkshop.com/beginner/085_scaling_karpenter/ +# https://karpenter.sh/v0.10.1/aws/provisioning/ +# https://www.eksworkshop.com/beginner/085_scaling_karpenter/setup_the_environment/ +# https://ec2spotworkshops.com/karpenter.html +# https://catalog.us-east-1.prod.workshops.aws/workshops/76a5dd80-3249-4101-8726-9be3eeee09b2/en-US/autoscaling/karpenter + +locals { + enabled = module.this.enabled + + eks_cluster_identity_oidc_issuer = try(module.eks.outputs.eks_cluster_identity_oidc_issuer, "") + karpenter_iam_role_name = try(module.eks.outputs.karpenter_iam_role_name, "") + + karpenter_instance_profile_enabled = local.enabled && var.legacy_create_karpenter_instance_profile && length(local.karpenter_iam_role_name) > 0 +} + +resource "aws_iam_instance_profile" "default" { + count = local.karpenter_instance_profile_enabled ? 1 : 0 + + name = local.karpenter_iam_role_name + role = local.karpenter_iam_role_name + tags = module.this.tags +} + +# See CHANGELOG for PR #868: +# https://github.com/cloudposse/terraform-aws-components/pull/868 +# +# Namespace was moved from the karpenter module to an independent resource in order to be +# shared between both the karpenter and karpenter-crd modules. +moved { + from = module.karpenter.kubernetes_namespace.default[0] + to = kubernetes_namespace.default[0] +} + +resource "kubernetes_namespace" "default" { + count = local.enabled && var.create_namespace ? 1 : 0 + + metadata { + name = var.kubernetes_namespace + annotations = {} + labels = merge(module.this.tags, { name = var.kubernetes_namespace }) + } +} + +# Deploy karpenter-crd helm chart +# "karpenter-crd" can be installed as an independent helm chart to manage the lifecycle of Karpenter CRDs +module "karpenter_crd" { + enabled = local.enabled && var.crd_chart_enabled + + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + name = var.crd_chart + chart = var.crd_chart + repository = var.chart_repository + description = var.chart_description + chart_version = var.chart_version + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + create_namespace_with_kubernetes = false # Namespace is created with kubernetes_namespace resources to be shared between charts + kubernetes_namespace = join("", kubernetes_namespace.default[*].id) + kubernetes_namespace_labels = merge(module.this.tags, { name = join("", kubernetes_namespace.default[*].id) }) + + eks_cluster_oidc_issuer_url = coalesce(replace(local.eks_cluster_identity_oidc_issuer, "https://", ""), "deleted") + + values = compact([ + # standard k8s object settings + yamlencode({ + fullnameOverride = module.this.name + resources = var.resources + rbac = { + create = var.rbac_enabled + } + }), + ]) + + context = module.this.context + + depends_on = [ + kubernetes_namespace.default + ] +} + +# Deploy Karpenter helm chart +module "karpenter" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + chart = var.chart + repository = var.chart_repository + description = var.chart_description + chart_version = var.chart_version + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + create_namespace_with_kubernetes = false # Namespace is created with kubernetes_namespace resources to be shared between charts + kubernetes_namespace = join("", kubernetes_namespace.default[*].id) + kubernetes_namespace_labels = merge(module.this.tags, { name = join("", kubernetes_namespace.default[*].id) }) + + eks_cluster_oidc_issuer_url = coalesce(replace(local.eks_cluster_identity_oidc_issuer, "https://", ""), "deleted") + + service_account_name = module.this.name + service_account_namespace = join("", kubernetes_namespace.default[*].id) + + iam_role_enabled = true + + # https://karpenter.sh/v0.6.1/getting-started/cloudformation.yaml + # https://karpenter.sh/v0.10.1/getting-started/getting-started-with-terraform + # https://github.com/aws/karpenter/issues/2649 + # Apparently the source of truth for the best IAM policy is the `data.aws_iam_policy_document.karpenter_controller` in + # https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/modules/iam-role-for-service-accounts-eks/policies.tf + iam_policy = [{ + statements = concat([ + { + sid = "KarpenterController" + effect = "Allow" + resources = ["*"] + + actions = [ + # https://github.com/terraform-aws-modules/terraform-aws-iam/blob/99c69ad54d985f67acf211885aa214a3a6cc931c/modules/iam-role-for-service-accounts-eks/policies.tf#L511-L581 + # The reference policy is broken up into multiple statements with different resource restrictions based on tags. + # This list has breaks where statements are separated in the reference policy for easier comparison and maintenance. + "ec2:CreateLaunchTemplate", + "ec2:CreateFleet", + "ec2:CreateTags", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSpotPriceHistory", + "pricing:GetProducts", + + "ec2:TerminateInstances", + "ec2:DeleteLaunchTemplate", + + "ec2:RunInstances", + + "iam:PassRole", + ] + }, + { + sid = "KarpenterControllerSSM" + effect = "Allow" + # Allow Karpenter to read AMI IDs from SSM + actions = ["ssm:GetParameter"] + resources = ["arn:aws:ssm:*:*:parameter/aws/service/*"] + }, + { + sid = "KarpenterControllerClusterAccess" + effect = "Allow" + actions = [ + "eks:DescribeCluster" + ] + resources = [ + module.eks.outputs.eks_cluster_arn + ] + } + ], + local.interruption_handler_enabled ? [ + { + sid = "KarpenterInterruptionHandlerAccess" + effect = "Allow" + actions = [ + "sqs:DeleteMessage", + "sqs:GetQueueUrl", + "sqs:GetQueueAttributes", + "sqs:ReceiveMessage", + ] + resources = [ + one(aws_sqs_queue.interruption_handler[*].arn) + ] + } + ] : [] + ) + }] + + + values = compact([ + # standard k8s object settings + yamlencode({ + fullnameOverride = module.this.name + serviceAccount = { + name = module.this.name + } + controller = { + resources = var.resources + } + rbac = { + create = var.rbac_enabled + } + }), + # karpenter-specific values + yamlencode({ + settings = { + # This configuration of settings requires Karpenter chart v0.19.0 or later + aws = { + defaultInstanceProfile = local.karpenter_iam_role_name # instance profile name === role name + clusterName = local.eks_cluster_id + # clusterEndpoint not needed as of v0.25.0 + clusterEndpoint = local.eks_cluster_endpoint + tags = module.this.tags + } + } + }), + yamlencode( + local.interruption_handler_enabled ? { + settings = { + aws = { + interruptionQueueName = local.interruption_handler_queue_name + } + } + } : {}), + # additional values + yamlencode(var.chart_values) + ]) + + context = module.this.context + + depends_on = [ + aws_iam_instance_profile.default, + module.karpenter_crd, + kubernetes_namespace.default + ] +} diff --git a/deprecated/eks/karpenter/outputs.tf b/deprecated/eks/karpenter/outputs.tf new file mode 100644 index 000000000..830bd12aa --- /dev/null +++ b/deprecated/eks/karpenter/outputs.tf @@ -0,0 +1,9 @@ +output "metadata" { + value = module.karpenter.metadata + description = "Block status of the deployed release" +} + +output "instance_profile" { + value = aws_iam_instance_profile.default + description = "Provisioned EC2 Instance Profile for nodes launched by Karpenter" +} diff --git a/deprecated/eks/karpenter/provider-helm.tf b/deprecated/eks/karpenter/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/deprecated/eks/karpenter/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/deprecated/eks/karpenter/providers.tf b/deprecated/eks/karpenter/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/deprecated/eks/karpenter/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/eks/karpenter/remote-state.tf b/deprecated/eks/karpenter/remote-state.tf new file mode 100644 index 000000000..c1ec8226d --- /dev/null +++ b/deprecated/eks/karpenter/remote-state.tf @@ -0,0 +1,8 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} diff --git a/deprecated/eks/karpenter/variables.tf b/deprecated/eks/karpenter/variables.tf new file mode 100644 index 000000000..9b84ba3b4 --- /dev/null +++ b/deprecated/eks/karpenter/variables.tf @@ -0,0 +1,134 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "chart_description" { + type = string + description = "Set release description attribute (visible in the history)" + default = null +} + +variable "chart" { + type = string + description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended" +} + +variable "chart_repository" { + type = string + description = "Repository URL where to locate the requested chart" +} + +variable "chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed" + default = null +} + +variable "crd_chart_enabled" { + type = bool + description = "`karpenter-crd` can be installed as an independent helm chart to manage the lifecycle of Karpenter CRDs. Set to `true` to install this CRD helm chart before the primary karpenter chart." + default = false +} + +variable "crd_chart" { + type = string + description = "The name of the Karpenter CRD chart to be installed, if `var.crd_chart_enabled` is set to `true`." + default = "karpenter-crd" +} + +variable "resources" { + type = object({ + limits = object({ + cpu = string + memory = string + }) + requests = object({ + cpu = string + memory = string + }) + }) + description = "The CPU and memory of the deployment's limits and requests" +} + +variable "create_namespace" { + type = bool + description = "Create the namespace if it does not yet exist. Defaults to `false`" + default = null +} + +variable "kubernetes_namespace" { + type = string + description = "The namespace to install the release into" +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = null +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails" + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used" + default = true +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`" + default = null +} + +variable "chart_values" { + type = any + description = "Additional values to yamlencode as `helm_release` values" + default = {} +} + +variable "rbac_enabled" { + type = bool + description = "Enable/disable RBAC" + default = true +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "interruption_handler_enabled" { + type = bool + default = false + description = < + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [jq](#requirement\_jq) | >= 0.2.1 | + +## Providers + +| Name | Version | +|------|---------| +| [jq](#provider\_jq) | >= 0.2.1 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [remote](#module\_remote) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [store\_write](#module\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.11.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [jq_query.default](https://registry.terraform.io/providers/massdriver-cloud/jq/latest/docs/data-sources/query) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [platform\_environment](#input\_platform\_environment) | Platform environment | `string` | `"default"` | no | +| [references](#input\_references) | Platform mapping from remote components outputs |
map(object({
component = string
privileged = optional(bool)
tenant = optional(string)
environment = optional(string)
stage = optional(string)
output = string
}))
| `{}` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [ssm\_platform\_path](#input\_ssm\_platform\_path) | Format SSM path to store platform configs | `string` | `"/platform/%s/%s"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +No outputs. + + + +[](https://cpco.io/component) diff --git a/modules/spacelift-worker-pool/context.tf b/deprecated/eks/platform/context.tf similarity index 100% rename from modules/spacelift-worker-pool/context.tf rename to deprecated/eks/platform/context.tf diff --git a/deprecated/eks/platform/main.tf b/deprecated/eks/platform/main.tf new file mode 100644 index 000000000..962dee498 --- /dev/null +++ b/deprecated/eks/platform/main.tf @@ -0,0 +1,49 @@ +locals { + metadata = { + kube_version = { + component = var.eks_component_name + output = "eks_cluster_version" + } + } +} + +module "store_write" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.11.0" + + parameter_write = concat( + [for k, v in var.references : + { + name = format("%s/%s", format(var.ssm_platform_path, module.eks.outputs.eks_cluster_id, var.platform_environment), k) + value = local.result[k] + type = "SecureString" + overwrite = true + description = "Platform config for ${var.platform_environment} at ${module.eks.outputs.eks_cluster_id} cluster" + } + ], + [for k, v in local.metadata : + { + name = format("%s/%s", format(var.ssm_platform_path, module.eks.outputs.eks_cluster_id, "_metadata"), k) + value = lookup(module.remote[k].outputs, v.output) + type = "SecureString" + overwrite = true + description = "Platform metadata for ${module.eks.outputs.eks_cluster_id} cluster" + } + ]) + + context = module.this.context +} + +data "jq_query" "default" { + for_each = var.references + data = jsonencode(module.remote[each.key].outputs) + # Query is left to be free form since setting this to something like `.` would + # mean you cannot handle arrays. For example, if you wanted to get the first + # element of an array, you would need to use `[0]` as the query, but having a + # query of `.` would not allow you to do that. It would render as '.[0]' + query = each.value.output +} + +locals { + result = { for k, v in data.jq_query.default : k => jsondecode(v.result) } +} diff --git a/deprecated/eks/platform/outputs.tf b/deprecated/eks/platform/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/deprecated/eks/platform/providers.tf b/deprecated/eks/platform/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/deprecated/eks/platform/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/eks/platform/remote-state.tf b/deprecated/eks/platform/remote-state.tf new file mode 100644 index 000000000..9c7a97c11 --- /dev/null +++ b/deprecated/eks/platform/remote-state.tf @@ -0,0 +1,23 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} + + +module "remote" { + for_each = merge(var.references, local.metadata) + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = each.value["component"] + privileged = coalesce(try(each.value["privileged"], null), false) + tenant = coalesce(try(each.value["tenant"], null), module.this.context["tenant"], null) + environment = coalesce(try(each.value["environment"], null), module.this.context["environment"], null) + stage = coalesce(try(each.value["stage"], null), module.this.context["stage"], null) + + context = module.this.context +} diff --git a/deprecated/eks/platform/variables.tf b/deprecated/eks/platform/variables.tf new file mode 100644 index 000000000..d905e7f31 --- /dev/null +++ b/deprecated/eks/platform/variables.tf @@ -0,0 +1,35 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "references" { + description = "Platform mapping from remote components outputs" + default = {} + type = map(object({ + component = string + privileged = optional(bool) + tenant = optional(string) + environment = optional(string) + stage = optional(string) + output = string + })) +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "ssm_platform_path" { + type = string + description = "Format SSM path to store platform configs" + default = "/platform/%s/%s" +} + +variable "platform_environment" { + type = string + description = "Platform environment" + default = "default" +} diff --git a/modules/eks/efs-controller/versions.tf b/deprecated/eks/platform/versions.tf similarity index 64% rename from modules/eks/efs-controller/versions.tf rename to deprecated/eks/platform/versions.tf index 58318d20e..557b20ee3 100644 --- a/modules/eks/efs-controller/versions.tf +++ b/deprecated/eks/platform/versions.tf @@ -4,11 +4,15 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.9.0" } helm = { source = "hashicorp/helm" version = ">= 2.0" } + jq = { + source = "massdriver-cloud/jq" + version = ">= 0.2.1" + } } } diff --git a/deprecated/github-actions-runner/runners/runner/docker-config.json b/deprecated/github-actions-runner/runners/runner/docker-config.json index c267984aa..66da23b90 100644 --- a/deprecated/github-actions-runner/runners/runner/docker-config.json +++ b/deprecated/github-actions-runner/runners/runner/docker-config.json @@ -1,4 +1,4 @@ { "credsStore": "ecr-login", "experimental": "enabled" -} \ No newline at end of file +} diff --git a/deprecated/github-actions-runner/variables.tf b/deprecated/github-actions-runner/variables.tf index d547c9380..a3b02b113 100644 --- a/deprecated/github-actions-runner/variables.tf +++ b/deprecated/github-actions-runner/variables.tf @@ -192,7 +192,7 @@ variable "runner_configurations" { error_message = "Variable runner_configurations can contain only one target key of either `repo` or `org` not both." } - # runner_configuration may only conatain map keys "repo", "org", "runner_type", "autoscale_type" + # runner_configuration may only contain map keys "repo", "org", "runner_type", "autoscale_type" validation { condition = alltrue([for r in var.runner_configurations : alltrue([for k in keys(r) : contains(["repo", "org", "runner_type", "autoscale_type"], k)])]) error_message = "Unknown map key, must be one of repo, org, runner_type or autoscale_type." diff --git a/deprecated/github-actions-runner/versions.tf b/deprecated/github-actions-runner/versions.tf index f5a0b0074..594f5ada0 100644 --- a/deprecated/github-actions-runner/versions.tf +++ b/deprecated/github-actions-runner/versions.tf @@ -11,4 +11,4 @@ terraform { version = ">= 2.0" } } -} \ No newline at end of file +} diff --git a/deprecated/gitops/README.md b/deprecated/gitops/README.md new file mode 100644 index 000000000..58a0a9404 --- /dev/null +++ b/deprecated/gitops/README.md @@ -0,0 +1,134 @@ +# Component: `gitops` + +This component is used to deploy GitHub OIDC roles for accessing the `gitops` Team. We use this team to run Terraform +from GitHub Actions. + +Examples: + +- [cloudposse/github-action-terraform-plan-storage](https://github.com/cloudposse/github-action-terraform-plan-storage/blob/main/.github/workflows/build-and-test.yml) + +## Usage + +**Stack Level**: Regional + +Here are some example snippets for how to use this component: + +```yaml +import: + - catalog/s3-bucket/defaults + - catalog/dynamodb/defaults + +components: + terraform: + # S3 Bucket for storing Terraform Plans + gitops/s3-bucket: + metadata: + component: s3-bucket + inherits: + - s3-bucket/defaults + vars: + name: gitops-plan-storage + allow_encrypted_uploads_only: false + + # DynamoDB table used to store metadata for Terraform Plans + gitops/dynamodb: + metadata: + component: dynamodb + inherits: + - dynamodb/defaults + vars: + name: gitops-plan-storage + # These keys (case-sensitive) are required for the cloudposse/github-action-terraform-plan-storage action + hash_key: id + range_key: createdAt + + gitops: + vars: + enabled: true + github_actions_iam_role_enabled: true + github_actions_iam_role_attributes: ["gitops"] + github_actions_allowed_repos: + - "acmeOrg/infra" + s3_bucket_component_name: gitops/s3-bucket + dynamodb_component_name: gitops/dynamodb +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [dynamodb](#module\_dynamodb) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [gha\_assume\_role](#module\_gha\_assume\_role) | ../account-map/modules/team-assume-role-policy | n/a | +| [gha\_role\_name](#module\_gha\_role\_name) | cloudposse/label/null | 0.25.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [s3\_bucket](#module\_s3\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_role.github_actions](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_policy_document.github_actions_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dynamodb\_component\_name](#input\_dynamodb\_component\_name) | The name of the dynamodb component used to store Terraform state | `string` | `"gitops/dynamodb"` | no | +| [dynamodb\_environment\_name](#input\_dynamodb\_environment\_name) | The name of the dynamodb environment used to store Terraform state | `string` | `null` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [github\_actions\_allowed\_repos](#input\_github\_actions\_allowed\_repos) | A list of the GitHub repositories that are allowed to assume this role from GitHub Actions. For example,
["cloudposse/infra-live"]. Can contain "*" as wildcard.
If org part of repo name is omitted, "cloudposse" will be assumed. | `list(string)` | `[]` | no | +| [github\_actions\_iam\_role\_attributes](#input\_github\_actions\_iam\_role\_attributes) | Additional attributes to add to the role name | `list(string)` | `[]` | no | +| [github\_actions\_iam\_role\_enabled](#input\_github\_actions\_iam\_role\_enabled) | Flag to toggle creation of an IAM Role that GitHub Actions can assume to access AWS resources | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [s3\_bucket\_component\_name](#input\_s3\_bucket\_component\_name) | The name of the s3\_bucket component used to store Terraform state | `string` | `"gitops/s3-bucket"` | no | +| [s3\_bucket\_environment\_name](#input\_s3\_bucket\_environment\_name) | The name of the s3\_bucket environment used to store Terraform state | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [github\_actions\_iam\_role\_arn](#output\_github\_actions\_iam\_role\_arn) | ARN of IAM role for GitHub Actions | +| [github\_actions\_iam\_role\_name](#output\_github\_actions\_iam\_role\_name) | Name of IAM role for GitHub Actions | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/gitops) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/spacelift/context.tf b/deprecated/gitops/context.tf similarity index 100% rename from modules/spacelift/context.tf rename to deprecated/gitops/context.tf diff --git a/deprecated/gitops/github-actions-iam-policy.tf b/deprecated/gitops/github-actions-iam-policy.tf new file mode 100644 index 000000000..396dd2344 --- /dev/null +++ b/deprecated/gitops/github-actions-iam-policy.tf @@ -0,0 +1,69 @@ +locals { + enabled = module.this.enabled + github_actions_iam_policy = data.aws_iam_policy_document.github_actions_iam_policy.json + + s3_bucket_arn = module.s3_bucket.outputs.bucket_arn + dynamodb_table_arn = module.dynamodb.outputs.table_arn +} + +data "aws_iam_policy_document" "github_actions_iam_policy" { + # Allow access to the Dynamodb table used to store TF Plans + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_dynamodb_specific-table.html + statement { + sid = "AllowDynamodbAccess" + effect = "Allow" + actions = [ + "dynamodb:List*", + "dynamodb:DescribeReservedCapacity*", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTimeToLive" + ] + resources = [ + "*" + ] + } + statement { + sid = "AllowDynamodbTableAccess" + effect = "Allow" + actions = [ + "dynamodb:BatchGet*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Get*", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:BatchWrite*", + "dynamodb:CreateTable", + "dynamodb:Delete*", + "dynamodb:Update*", + "dynamodb:PutItem" + ] + resources = [ + local.dynamodb_table_arn, + "${local.dynamodb_table_arn}/*" + ] + } + + # Allow access to the S3 Bucket used to store TF Plans + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html + statement { + sid = "AllowS3Actions" + effect = "Allow" + actions = [ + "s3:ListBucket" + ] + resources = [ + local.s3_bucket_arn + ] + } + statement { + sid = "AllowS3ObjectActions" + effect = "Allow" + actions = [ + "s3:*Object" + ] + resources = [ + "${local.s3_bucket_arn}/*" + ] + } +} diff --git a/deprecated/gitops/github-actions-iam-role.mixin.tf b/deprecated/gitops/github-actions-iam-role.mixin.tf new file mode 100644 index 000000000..de68c6602 --- /dev/null +++ b/deprecated/gitops/github-actions-iam-role.mixin.tf @@ -0,0 +1,72 @@ +# This mixin requires that a local variable named `github_actions_iam_policy` be defined +# and its value to be a JSON IAM Policy Document defining the permissions for the role. +# It also requires that the `github-oidc-provider` has been previously installed and the +# `github-assume-role-policy.mixin.tf` has been added to `account-map/modules/team-assume-role-policy`. + +variable "github_actions_iam_role_enabled" { + type = bool + description = <<-EOF + Flag to toggle creation of an IAM Role that GitHub Actions can assume to access AWS resources + EOF + default = false +} + +variable "github_actions_allowed_repos" { + type = list(string) + description = < 0 +} + +module "gha_role_name" { + source = "cloudposse/label/null" + version = "0.25.0" + + enabled = local.github_actions_iam_role_enabled + attributes = compact(concat(var.github_actions_iam_role_attributes, ["gha"])) + + context = module.this.context +} + +module "gha_assume_role" { + source = "../account-map/modules/team-assume-role-policy" + + trusted_github_repos = var.github_actions_allowed_repos + + context = module.gha_role_name.context +} + +resource "aws_iam_role" "github_actions" { + count = local.github_actions_iam_role_enabled ? 1 : 0 + name = module.gha_role_name.id + assume_role_policy = module.gha_assume_role.github_assume_role_policy + + inline_policy { + name = module.gha_role_name.id + policy = local.github_actions_iam_policy + } +} + +output "github_actions_iam_role_arn" { + value = one(aws_iam_role.github_actions[*].arn) + description = "ARN of IAM role for GitHub Actions" +} + +output "github_actions_iam_role_name" { + value = one(aws_iam_role.github_actions[*].name) + description = "Name of IAM role for GitHub Actions" +} diff --git a/deprecated/gitops/providers.tf b/deprecated/gitops/providers.tf new file mode 100644 index 000000000..54257fd20 --- /dev/null +++ b/deprecated/gitops/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/gitops/remote-state.tf b/deprecated/gitops/remote-state.tf new file mode 100644 index 000000000..57954c886 --- /dev/null +++ b/deprecated/gitops/remote-state.tf @@ -0,0 +1,19 @@ +module "s3_bucket" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.s3_bucket_component_name + environment = try(var.s3_bucket_environment_name, module.this.environment) + + context = module.this.context +} + +module "dynamodb" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.dynamodb_component_name + environment = try(var.dynamodb_environment_name, module.this.environment) + + context = module.this.context +} diff --git a/deprecated/gitops/variables.tf b/deprecated/gitops/variables.tf new file mode 100644 index 000000000..64bb993d4 --- /dev/null +++ b/deprecated/gitops/variables.tf @@ -0,0 +1,28 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "s3_bucket_component_name" { + type = string + description = "The name of the s3_bucket component used to store Terraform state" + default = "gitops/s3-bucket" +} + +variable "s3_bucket_environment_name" { + type = string + description = "The name of the s3_bucket environment used to store Terraform state" + default = null +} + +variable "dynamodb_component_name" { + type = string + description = "The name of the dynamodb component used to store Terraform state" + default = "gitops/dynamodb" +} + +variable "dynamodb_environment_name" { + type = string + description = "The name of the dynamodb environment used to store Terraform state" + default = null +} diff --git a/deprecated/gitops/versions.tf b/deprecated/gitops/versions.tf new file mode 100644 index 000000000..cc73ffd35 --- /dev/null +++ b/deprecated/gitops/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/deprecated/guardduty/common/README.md b/deprecated/guardduty/common/README.md new file mode 100644 index 000000000..3135006e8 --- /dev/null +++ b/deprecated/guardduty/common/README.md @@ -0,0 +1,117 @@ +# Component: `guardduty/common` + +This component is responsible for configuring GuardDuty and it should be used in tandem with the [guardduty/root](../root) component. + +AWS GuardDuty is a managed threat detection service. It is designed to help protect AWS accounts and workloads by continuously monitoring for malicious activities and unauthorized behaviors. GuardDuty analyzes various data sources within your AWS environment, such as AWS CloudTrail logs, VPC Flow Logs, and DNS logs, to detect potential security threats. + +Key features and components of AWS GuardDuty include: + +- Threat detection: GuardDuty employs machine learning algorithms, anomaly detection, and integrated threat intelligence to identify suspicious activities, unauthorized access attempts, and potential security threats. It analyzes event logs and network traffic data to detect patterns, anomalies, and known attack techniques. + +- Threat intelligence: GuardDuty leverages threat intelligence feeds from AWS, trusted partners, and the global community to enhance its detection capabilities. It uses this intelligence to identify known malicious IP addresses, domains, and other indicators of compromise. + +- Real-time alerts: When GuardDuty identifies a potential security issue, it generates real-time alerts that can be delivered through AWS CloudWatch Events. These alerts can be integrated with other AWS services like Amazon SNS or AWS Lambda for immediate action or custom response workflows. + +- Multi-account support: GuardDuty can be enabled across multiple AWS accounts, allowing centralized management and monitoring of security across an entire organization's AWS infrastructure. This helps to maintain consistent security policies and practices. + +- Automated remediation: GuardDuty integrates with other AWS services, such as AWS Macie, AWS Security Hub, and AWS Systems Manager, to facilitate automated threat response and remediation actions. This helps to minimize the impact of security incidents and reduces the need for manual intervention. + +- Security findings and reports: GuardDuty provides detailed security findings and reports that include information about detected threats, affected AWS resources, and recommended remediation actions. These findings can be accessed through the AWS Management Console or retrieved via APIs for further analysis and reporting. + +GuardDuty offers a scalable and flexible approach to threat detection within AWS environments, providing organizations with an additional layer of security to proactively identify and respond to potential security risks. + +## Usage + +**Stack Level**: Regional + +The example snippet below shows how to use this component: + +```yaml +components: + terraform: + guardduty/common: + metadata: + component: guardduty/common + vars: + enabled: true + account_map_tenant: core + central_resource_collector_account: core-security + admin_delegated: true +``` + +## Deployment + +This set of steps assumes that `var.central_resource_collector_account = "core-security"`. + +1. Apply `guardduty/common` to `core-security` with `var.admin_delegated = false` +2. Apply `guardduty/root` to `core-root` +3. Apply `guardduty/common` to `core-security` with `var.admin_delegated = true` + +Example: + +``` +# Apply guardduty/common to all regions in core-security +atmos terraform apply guardduty/common-ue2 -s core-ue2-security -var=admin_delegated=false +atmos terraform apply guardduty/common-ue1 -s core-ue1-security -var=admin_delegated=false +atmos terraform apply guardduty/common-uw1 -s core-uw1-security -var=admin_delegated=false +# ... other regions + +# Apply guardduty/root to all regions in core-root +atmos terraform apply guardduty/root-ue2 -s core-ue2-root +atmos terraform apply guardduty/root-ue1 -s core-ue1-root +atmos terraform apply guardduty/root-uw1 -s core-uw1-root +# ... other regions + +# Apply guardduty/common to all regions in core-security but with default values for admin_delegated +atmos terraform apply guardduty/common-ue2 -s core-ue2-security +atmos terraform apply guardduty/common-ue1 -s core-ue1-security +atmos terraform apply guardduty/common-uw1 -s core-uw1-security +# ... other regions +``` + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [awsutils](#provider\_awsutils) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.2 | +| [guardduty](#module\_guardduty) | cloudposse/guardduty/aws | 0.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | + +## Resources + +| Name | Type | +|------|------| +| [awsutils_guardduty_organization_settings.this](https://registry.terraform.io/providers/hashicorp/awsutils/latest/docs/resources/guardduty_organization_settings) | resource | +| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| [guardduty\_detector\_arn](#output\_guardduty\_detector\_arn) | GuardDuty detector ARN | +| [guardduty\_detector\_id](#output\_guardduty\_detector\_id) | GuardDuty detector ID | +| [sns\_topic\_name](#output\_sns\_topic\_name) | SNS topic name | +| [sns\_topic\_subscriptions](#output\_sns\_topic\_subscriptions) | SNS topic subscriptions | + + +## References +* [AWS GuardDuty Documentation](https://aws.amazon.com/guardduty/) +* [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/guardduty/common/) + +[](https://cpco.io/component) diff --git a/deprecated/guardduty/common/main.tf b/deprecated/guardduty/common/main.tf new file mode 100644 index 000000000..29420b177 --- /dev/null +++ b/deprecated/guardduty/common/main.tf @@ -0,0 +1,35 @@ +locals { + enabled = module.this.enabled + create_sns_topic = local.enabled && var.create_sns_topic + account_map = module.account_map.outputs.full_account_map + central_resource_collector_account = local.account_map[var.central_resource_collector_account] + account_id = one(data.aws_caller_identity.this[*].account_id) + is_global_collector_account = local.account_id == local.central_resource_collector_account + member_account_list = [for a in keys(local.account_map) : (local.account_map[a]) if local.account_map[a] != local.account_id] +} + +module "guardduty" { + count = local.enabled && local.is_global_collector_account ? 1 : 0 + source = "cloudposse/guardduty/aws" + version = "0.5.0" + + finding_publishing_frequency = var.finding_publishing_frequency + create_sns_topic = var.create_sns_topic + findings_notification_arn = var.findings_notification_arn + subscribers = var.subscribers + enable_cloudwatch = var.enable_cloudwatch + cloudwatch_event_rule_pattern_detail_type = var.cloudwatch_event_rule_pattern_detail_type + + context = module.this.context +} + +data "aws_caller_identity" "this" { + count = local.enabled ? 1 : 0 +} + +resource "awsutils_guardduty_organization_settings" "this" { + count = local.enabled && var.admin_delegated && local.is_global_collector_account ? 1 : 0 + + member_accounts = local.member_account_list + detector_id = module.guardduty[0].guardduty_detector.id +} diff --git a/deprecated/guardduty/common/outputs.tf b/deprecated/guardduty/common/outputs.tf new file mode 100644 index 000000000..0a527a355 --- /dev/null +++ b/deprecated/guardduty/common/outputs.tf @@ -0,0 +1,19 @@ +output "guardduty_detector_arn" { + value = one(module.guardduty[*].guardduty_detector.arn) + description = "GuardDuty detector ARN" +} + +output "guardduty_detector_id" { + value = one(module.guardduty[*].guardduty_detector.id) + description = "GuardDuty detector ID" +} + +output "sns_topic_name" { + description = "SNS topic name" + value = one(module.guardduty[*].sns_topic.name) +} + +output "sns_topic_subscriptions" { + description = "SNS topic subscriptions" + value = one(module.guardduty[*].sns_topic_subscriptions) +} diff --git a/deprecated/guardduty/common/providers.tf b/deprecated/guardduty/common/providers.tf new file mode 100644 index 000000000..45d458575 --- /dev/null +++ b/deprecated/guardduty/common/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/guardduty/common/remote-state.tf b/deprecated/guardduty/common/remote-state.tf new file mode 100644 index 000000000..5595945d0 --- /dev/null +++ b/deprecated/guardduty/common/remote-state.tf @@ -0,0 +1,12 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.2" + + component = "account-map" + tenant = (var.account_map_tenant != "") ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} diff --git a/deprecated/guardduty/root/README.md b/deprecated/guardduty/root/README.md new file mode 100644 index 000000000..eb9b5c914 --- /dev/null +++ b/deprecated/guardduty/root/README.md @@ -0,0 +1,98 @@ +# Component: `guardduty/root` + +This component should be used in tandem with the [guardduty/common](../common/) component. Please take a look at [guardduty/common/README](../common/README.md) for more information about GuardDuty and deployment steps. + +This component is responsible for delegating the AWS GuardDuty administrator accounts to the appropriate account(s). It should be deployed to every region for the root account in the AWS Organization. + +## Usage + +**Stack Level**: Regional + +The example snippet below shows how to use this component: + +```yaml +components: + terraform: + guardduty/root: + metadata: + component: guardduty/root + vars: + enabled: true + account_map_tenant: core + administrator_account: core-security +``` + +## Deployment + +Please see instructions in [guardduty/common/README](../common/README.md) for information on how to deploy both components. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [awsutils](#requirement\_awsutils) | >= 0.16.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.2 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [utils](#module\_utils) | cloudposse/utils/aws | 1.3.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_guardduty_detector.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector) | resource | +| [aws_guardduty_organization_admin_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_organization_admin_account) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant](#input\_account\_map\_tenant) | The tenant where the `account_map` component required by remote-state is deployed | `string` | `""` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [administrator\_account](#input\_administrator\_account) | The name of the account that is the GuardDuty administrator account | `string` | `null` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [privileged](#input\_privileged) | True if the default provider already has access to the backend | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (management) account | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +No outputs. + + +## References +* [AWS GuardDuty Documentation](https://aws.amazon.com/guardduty/) +* [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/guardduty/root/) + +[](https://cpco.io/component) diff --git a/modules/sqs-queue/modules/terraform-aws-sqs-queue/context.tf b/deprecated/guardduty/root/context.tf similarity index 100% rename from modules/sqs-queue/modules/terraform-aws-sqs-queue/context.tf rename to deprecated/guardduty/root/context.tf diff --git a/deprecated/guardduty/root/main.tf b/deprecated/guardduty/root/main.tf new file mode 100644 index 000000000..ac36e53ba --- /dev/null +++ b/deprecated/guardduty/root/main.tf @@ -0,0 +1,29 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map +} + +module "utils" { + source = "cloudposse/utils/aws" + version = "1.3.0" + + context = module.this.context +} + +resource "aws_guardduty_organization_admin_account" "this" { + count = local.enabled && var.administrator_account != null && var.administrator_account != "" ? 1 : 0 + + admin_account_id = local.account_map[var.administrator_account] +} + +resource "aws_guardduty_detector" "this" { + count = local.enabled && var.administrator_account != null && var.administrator_account != "" ? 1 : 0 + + enable = true + + datasources { + s3_logs { + enable = true + } + } +} diff --git a/deprecated/guardduty/root/providers.tf b/deprecated/guardduty/root/providers.tf new file mode 100644 index 000000000..45d458575 --- /dev/null +++ b/deprecated/guardduty/root/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/guardduty/root/remote-state.tf b/deprecated/guardduty/root/remote-state.tf new file mode 100644 index 000000000..5595945d0 --- /dev/null +++ b/deprecated/guardduty/root/remote-state.tf @@ -0,0 +1,12 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.2" + + component = "account-map" + tenant = (var.account_map_tenant != "") ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} diff --git a/deprecated/guardduty/root/variables.tf b/deprecated/guardduty/root/variables.tf new file mode 100644 index 000000000..7205eb4fd --- /dev/null +++ b/deprecated/guardduty/root/variables.tf @@ -0,0 +1,34 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "account_map_tenant" { + type = string + default = "" + description = "The tenant where the `account_map` component required by remote-state is deployed" +} + +variable "root_account_stage" { + type = string + default = "root" + description = "The stage name for the Organization root (management) account" +} + +variable "global_environment" { + type = string + default = "gbl" + description = "Global environment name" +} + +variable "privileged" { + type = bool + description = "True if the default provider already has access to the backend" + default = false +} + +variable "administrator_account" { + description = "The name of the account that is the GuardDuty administrator account" + type = string + default = null +} diff --git a/deprecated/guardduty/root/versions.tf b/deprecated/guardduty/root/versions.tf new file mode 100644 index 000000000..65cf14c13 --- /dev/null +++ b/deprecated/guardduty/root/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + + awsutils = { + source = "cloudposse/awsutils" + version = ">= 0.16.0" + } + } +} diff --git a/deprecated/iam-primary-roles/README.md b/deprecated/iam-primary-roles/README.md index e5fedeed2..f86f57c53 100644 --- a/deprecated/iam-primary-roles/README.md +++ b/deprecated/iam-primary-roles/README.md @@ -35,15 +35,15 @@ components: # Override the default Role for accessing the backend, because SuperAdmin is not allowed to assume that role role_arn: null vars: - # Historically there was a practical difference between the Primary Roles defined in - # `primary_roles_config` and the Delegated Roles defined in `delegated_roles_config`, + # Historically there was a practical difference between the Primary Roles defined in + # `primary_roles_config` and the Delegated Roles defined in `delegated_roles_config`, # but now the difference is mainly for documentation and bookkeeping. - + # `primary_roles_config` is for roles that only appear in the identity account. # A role in the identity account should be thought of as an IAM access group. # By giving someone access to an identity account role, you are actually # giving them access to a set of roles in a set of accounts. - + # delegated_roles_config is for roles that appear in all (or most) accounts. # Delegated roles correspond more closely to job functions. diff --git a/deprecated/iam-primary-roles/remote-state.tf b/deprecated/iam-primary-roles/remote-state.tf index 059497226..9adfe23e8 100644 --- a/deprecated/iam-primary-roles/remote-state.tf +++ b/deprecated/iam-primary-roles/remote-state.tf @@ -21,4 +21,3 @@ module "account_map" { context = module.this.context } - diff --git a/deprecated/securityhub/securityhub/common/README.md b/deprecated/securityhub/securityhub/common/README.md new file mode 100644 index 000000000..7ca77c542 --- /dev/null +++ b/deprecated/securityhub/securityhub/common/README.md @@ -0,0 +1,180 @@ +# Component: `securityhub/common` + +This component is responsible for configuring Security Hub and it should be used in tandem with the [securityhub/root](../root) component. + +Amazon Security Hub enables users to centrally manage and monitor the security and compliance of their AWS accounts and resources. It aggregates, organizes, and prioritizes security findings from various AWS services, third-party tools, and integrated partner solutions. + +Here are the key features and capabilities of Amazon Security Hub: + +- Centralized security management: Security Hub provides a centralized dashboard where users can view and manage security findings from multiple AWS accounts and regions. This allows for a unified view of the security posture across the entire AWS environment. + +- Automated security checks: Security Hub automatically performs continuous security checks on AWS resources, configurations, and security best practices. It leverages industry standards and compliance frameworks, such as AWS CIS Foundations Benchmark, to identify potential security issues. + +- Integrated partner solutions: Security Hub integrates with a wide range of AWS native services, as well as third-party security products and solutions. This integration enables the ingestion and analysis of security findings from diverse sources, offering a comprehensive security view. + +- Security standards and compliance: Security Hub provides compliance checks against industry standards and regulatory frameworks, such as PCI DSS, HIPAA, and GDPR. It identifies non-compliant resources and provides guidance on remediation actions to ensure adherence to security best practices. + +- Prioritized security findings: Security Hub analyzes and prioritizes security findings based on severity, enabling users to focus on the most critical issues. It assigns severity levels and generates a consolidated view of security alerts, allowing for efficient threat response and remediation. + +- Custom insights and event aggregation: Security Hub supports custom insights, allowing users to create their own rules and filters to focus on specific security criteria or requirements. It also provides event aggregation and correlation capabilities to identify related security findings and potential attack patterns. + +- Integration with other AWS services: Security Hub seamlessly integrates with other AWS services, such as AWS CloudTrail, Amazon GuardDuty, AWS Config, and AWS IAM Access Analyzer. This integration allows for enhanced visibility, automated remediation, and streamlined security operations. + +- Alert notifications and automation: Security Hub supports alert notifications through Amazon SNS, enabling users to receive real-time notifications of security findings. It also facilitates automation and response through integration with AWS Lambda, allowing for automated remediation actions. + +By utilizing Amazon Security Hub, organizations can improve their security posture, gain insights into security risks, and effectively manage security compliance across their AWS accounts and resources. + +## Usage + +**Stack Level**: Regional + +The example snippet below shows how to use this component: + +```yaml +components: + terraform: + securityhub/common: + metadata: + component: securityhub/common + vars: + enabled: true + account_map_tenant: core + central_resource_collector_account: core-security + admin_delegated: false + central_resource_collector_region: us-east-1 + finding_aggregator_enabled: true + create_sns_topic: true + enable_default_standards: false + enabled_standards: + - standards/cis-aws-foundations-benchmark/v/1.4.0 +``` + +## Deployment + +1. Apply `securityhub/common` to all accounts +2. Apply `securityhub/root` to `core-root` account +3. Apply `securityhub/common` to `core-security` with `var.admin_delegated = true` + +Example: + +``` +export regions="use1 use2 usw1 usw2 aps1 apne3 apne2 apne1 apse1 apse2 cac1 euc1 euw1 euw2 euw3 eun1 sae1" + +# apply to core-* + +export stages="artifacts audit auto corp dns identity network security" +for region in ${regions}; do + for stage in ${stages}; do + atmos terraform deploy securityhub/common-${region} -s core-${region}-${stage} || echo "core-${region}-${stage}" >> failures; + done; +done + +# apply to plat-* + +export stages="dev prod sandbox staging" +for region in ${regions}; do + for stage in ${stages}; do + atmos terraform deploy securityhub/common-${region} -s plat-${region}-${stage} || echo "plat-${region}-${stage}" >> failures; + done; +done + +# apply to "core-root" using "superadmin" privileges + +for region in ${regions}; do + atmos terraform deploy securityhub/root-${region} -s core-${region}-root || echo "core-${region}-root" >> failures; +done + +# apply to "core-security" again with "var.admin_delegated=true" + +for region in ${regions}; do + atmos terraform deploy securityhub/common-${region} -s core-${region}-security -var=admin_delegated=true || echo "core-${region}-security" >> failures; +done +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [awsutils](#requirement\_awsutils) | >= 0.16.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [awsutils](#provider\_awsutils) | >= 0.16.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.2 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [security\_hub](#module\_security\_hub) | cloudposse/security-hub/aws | 0.10.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_securityhub_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_account) | resource | +| [aws_securityhub_standards_subscription.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_standards_subscription) | resource | +| [awsutils_security_hub_organization_settings.this](https://registry.terraform.io/providers/cloudposse/awsutils/latest/docs/resources/security_hub_organization_settings) | resource | +| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_partition.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_region.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant](#input\_account\_map\_tenant) | The tenant where the `account_map` component required by remote-state is deployed | `string` | `""` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_delegated](#input\_admin\_delegated) | A flag to indicate if the Security Hub Administrator account has been designated from the root account.

This component should be applied with this variable set to `false`, then the securityhub/root component should be applied
to designate the administrator account, then this component should be applied again with this variable set to `true`. | `bool` | `false` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [central\_resource\_collector\_account](#input\_central\_resource\_collector\_account) | The name of the account that is the centralized aggregation account | `string` | n/a | yes | +| [central\_resource\_collector\_region](#input\_central\_resource\_collector\_region) | The region that collects findings | `string` | n/a | yes | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_sns\_topic](#input\_create\_sns\_topic) | Flag to indicate whether an SNS topic should be created for notifications | `bool` | `false` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enable\_default\_standards](#input\_enable\_default\_standards) | Flag to indicate whether default standards should be enabled | `bool` | `true` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [enabled\_standards](#input\_enabled\_standards) | A list of standards to enable in the account.

For example:
- standards/aws-foundational-security-best-practices/v/1.0.0
- ruleset/cis-aws-foundations-benchmark/v/1.2.0
- standards/pci-dss/v/3.2.1
- standards/cis-aws-foundations-benchmark/v/1.4.0 | `set(string)` | `[]` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [finding\_aggregator\_enabled](#input\_finding\_aggregator\_enabled) | Flag to indicate whether a finding aggregator should be created

If you want to aggregate findings from one region, set this to `true`.

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_finding_aggregator | `bool` | `false` | no | +| [finding\_aggregator\_linking\_mode](#input\_finding\_aggregator\_linking\_mode) | Linking mode to use for the finding aggregator.

The possible values are:
- `ALL_REGIONS` - Aggregate from all regions
- `ALL_REGIONS_EXCEPT_SPECIFIED` - Aggregate from all regions except those specified in `var.finding_aggregator_regions`
- `SPECIFIED_REGIONS` - Aggregate from regions specified in `var.finding_aggregator_regions` | `string` | `"ALL_REGIONS"` | no | +| [finding\_aggregator\_regions](#input\_finding\_aggregator\_regions) | A list of regions to aggregate findings from.

This is only used if `finding_aggregator_enabled` is `true`. | `any` | `null` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [privileged](#input\_privileged) | True if the default provider already has access to the backend | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (management) account | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [enabled\_subscriptions](#output\_enabled\_subscriptions) | A list of subscriptions that have been enabled | +| [sns\_topic\_name](#output\_sns\_topic\_name) | The SNS topic name that was created | +| [sns\_topic\_subscriptions](#output\_sns\_topic\_subscriptions) | The SNS topic subscriptions | + + +## References +* [AWS Security Hub Documentation](https://aws.amazon.com/security-hub/) +* [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/securityhub/common/) + +[](https://cpco.io/component) diff --git a/modules/sso/context.tf b/deprecated/securityhub/securityhub/common/context.tf similarity index 100% rename from modules/sso/context.tf rename to deprecated/securityhub/securityhub/common/context.tf diff --git a/deprecated/securityhub/securityhub/common/main.tf b/deprecated/securityhub/securityhub/common/main.tf new file mode 100644 index 000000000..cb155dd9a --- /dev/null +++ b/deprecated/securityhub/securityhub/common/main.tf @@ -0,0 +1,60 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map + central_resource_collector_account = local.account_map[var.central_resource_collector_account] + account_id = one(data.aws_caller_identity.this[*].account_id) + region_name = one(data.aws_region.this[*].name) + is_global_collector_account = local.central_resource_collector_account == local.account_id + is_collector_region = local.region_name == var.central_resource_collector_region + member_account_list = [for a in keys(local.account_map) : (local.account_map[a]) if local.account_map[a] != local.account_id] + enabled_standards_arns = toset([ + for standard in var.enabled_standards : + format("arn:%s:securityhub:%s::%s", one(data.aws_partition.this[*].partition), length(regexall("ruleset", standard)) == 0 ? one(data.aws_region.this[*].name) : "", standard) + ]) +} + +data "aws_caller_identity" "this" { + count = local.enabled ? 1 : 0 +} + +data "aws_region" "this" { + count = local.enabled ? 1 : 0 +} + +data "aws_partition" "this" { + count = local.enabled ? 1 : 0 +} + +module "security_hub" { + count = local.enabled && local.is_global_collector_account ? 1 : 0 + source = "cloudposse/security-hub/aws" + version = "0.10.0" + + create_sns_topic = var.create_sns_topic + enabled_standards = var.enabled_standards + finding_aggregator_enabled = local.is_collector_region && var.finding_aggregator_enabled + finding_aggregator_linking_mode = var.finding_aggregator_linking_mode + finding_aggregator_regions = var.finding_aggregator_regions + enable_default_standards = var.enable_default_standards + + context = module.this.context +} + +resource "aws_securityhub_account" "this" { + count = local.enabled && !local.is_global_collector_account ? 1 : 0 + + enable_default_standards = var.enable_default_standards +} + +resource "aws_securityhub_standards_subscription" "this" { + for_each = local.enabled && !local.is_global_collector_account ? local.enabled_standards_arns : [] + depends_on = [aws_securityhub_account.this] + standards_arn = each.key +} + +resource "awsutils_security_hub_organization_settings" "this" { + count = local.enabled && local.is_global_collector_account && var.admin_delegated ? 1 : 0 + + member_accounts = local.member_account_list + auto_enable_new_accounts = true +} diff --git a/deprecated/securityhub/securityhub/common/outputs.tf b/deprecated/securityhub/securityhub/common/outputs.tf new file mode 100644 index 000000000..e96f886b2 --- /dev/null +++ b/deprecated/securityhub/securityhub/common/outputs.tf @@ -0,0 +1,14 @@ +output "enabled_subscriptions" { + description = "A list of subscriptions that have been enabled" + value = local.enabled && local.is_global_collector_account ? module.security_hub[0].enabled_subscriptions : [] +} + +output "sns_topic_name" { + description = "The SNS topic name that was created" + value = local.enabled && local.is_global_collector_account && var.create_sns_topic ? module.security_hub[0].sns_topic.name : null +} + +output "sns_topic_subscriptions" { + description = "The SNS topic subscriptions" + value = local.enabled && local.is_global_collector_account && var.create_sns_topic ? module.security_hub[0].sns_topic_subscriptions : null +} diff --git a/deprecated/securityhub/securityhub/common/providers.tf b/deprecated/securityhub/securityhub/common/providers.tf new file mode 100644 index 000000000..45d458575 --- /dev/null +++ b/deprecated/securityhub/securityhub/common/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/securityhub/securityhub/common/remote-state.tf b/deprecated/securityhub/securityhub/common/remote-state.tf new file mode 100644 index 000000000..5595945d0 --- /dev/null +++ b/deprecated/securityhub/securityhub/common/remote-state.tf @@ -0,0 +1,12 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.2" + + component = "account-map" + tenant = (var.account_map_tenant != "") ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} diff --git a/deprecated/securityhub/securityhub/common/variables.tf b/deprecated/securityhub/securityhub/common/variables.tf new file mode 100644 index 000000000..476ee5c10 --- /dev/null +++ b/deprecated/securityhub/securityhub/common/variables.tf @@ -0,0 +1,112 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "account_map_tenant" { + type = string + default = "" + description = "The tenant where the `account_map` component required by remote-state is deployed" +} + +variable "root_account_stage" { + type = string + default = "root" + description = "The stage name for the Organization root (management) account" +} + +variable "global_environment" { + type = string + default = "gbl" + description = "Global environment name" +} + +variable "privileged" { + type = bool + description = "True if the default provider already has access to the backend" + default = false +} + +variable "central_resource_collector_account" { + description = "The name of the account that is the centralized aggregation account" + type = string +} + +variable "central_resource_collector_region" { + description = "The region that collects findings" + type = string +} + +variable "create_sns_topic" { + description = "Flag to indicate whether an SNS topic should be created for notifications" + type = bool + default = false +} + +variable "enable_default_standards" { + description = "Flag to indicate whether default standards should be enabled" + type = bool + default = true +} + +variable "enabled_standards" { + description = < +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [awsutils](#requirement\_awsutils) | >= 0.16.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.2 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_securityhub_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_account) | resource | +| [aws_securityhub_organization_admin_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_organization_admin_account) | resource | +| [aws_securityhub_standards_subscription.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_standards_subscription) | resource | +| [aws_partition.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_region.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant](#input\_account\_map\_tenant) | The tenant where the `account_map` component required by remote-state is deployed | `string` | `""` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [administrator\_account](#input\_administrator\_account) | The name of the account that is the Security Hub administrator account | `string` | `null` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enable\_default\_standards](#input\_enable\_default\_standards) | Flag to indicate whether default standards should be enabled | `bool` | `true` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [enabled\_standards](#input\_enabled\_standards) | A list of standards to enable in the account.

For example:
- standards/aws-foundational-security-best-practices/v/1.0.0
- ruleset/cis-aws-foundations-benchmark/v/1.2.0
- standards/pci-dss/v/3.2.1
- standards/cis-aws-foundations-benchmark/v/1.4.0 | `set(string)` | `[]` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [privileged](#input\_privileged) | True if the default provider already has access to the backend | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (management) account | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +No outputs. + + +## References +* [AWS Security Hub Documentation](https://aws.amazon.com/security-hub/) +* [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/securityhub/root/) + +[](https://cpco.io/component) diff --git a/modules/sso/modules/okta-user/context.tf b/deprecated/securityhub/securityhub/root/context.tf similarity index 100% rename from modules/sso/modules/okta-user/context.tf rename to deprecated/securityhub/securityhub/root/context.tf diff --git a/deprecated/securityhub/securityhub/root/main.tf b/deprecated/securityhub/securityhub/root/main.tf new file mode 100644 index 000000000..a048d23df --- /dev/null +++ b/deprecated/securityhub/securityhub/root/main.tf @@ -0,0 +1,38 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map + enabled_standards_arns = toset([ + for standard in var.enabled_standards : + format("arn:%s:securityhub:%s::%s", one(data.aws_partition.this[*].partition), length(regexall("ruleset", standard)) == 0 ? one(data.aws_region.this[*].name) : "", standard) + ]) +} + +data "aws_partition" "this" { + count = local.enabled ? 1 : 0 +} + +data "aws_region" "this" { + count = local.enabled ? 1 : 0 +} + +resource "aws_securityhub_organization_admin_account" "this" { + count = local.enabled && var.administrator_account != null && var.administrator_account != "" ? 1 : 0 + + admin_account_id = local.account_map[var.administrator_account] +} + +resource "aws_securityhub_account" "this" { + count = local.enabled ? 1 : 0 + + enable_default_standards = var.enable_default_standards + + depends_on = [ + aws_securityhub_organization_admin_account.this + ] +} + +resource "aws_securityhub_standards_subscription" "this" { + for_each = local.enabled ? local.enabled_standards_arns : [] + depends_on = [aws_securityhub_account.this] + standards_arn = each.key +} diff --git a/deprecated/securityhub/securityhub/root/providers.tf b/deprecated/securityhub/securityhub/root/providers.tf new file mode 100644 index 000000000..45d458575 --- /dev/null +++ b/deprecated/securityhub/securityhub/root/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/securityhub/securityhub/root/remote-state.tf b/deprecated/securityhub/securityhub/root/remote-state.tf new file mode 100644 index 000000000..5595945d0 --- /dev/null +++ b/deprecated/securityhub/securityhub/root/remote-state.tf @@ -0,0 +1,12 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.2" + + component = "account-map" + tenant = (var.account_map_tenant != "") ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} diff --git a/deprecated/securityhub/securityhub/root/variables.tf b/deprecated/securityhub/securityhub/root/variables.tf new file mode 100644 index 000000000..07027cc2a --- /dev/null +++ b/deprecated/securityhub/securityhub/root/variables.tf @@ -0,0 +1,54 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "account_map_tenant" { + type = string + default = "" + description = "The tenant where the `account_map` component required by remote-state is deployed" +} + +variable "root_account_stage" { + type = string + default = "root" + description = "The stage name for the Organization root (management) account" +} + +variable "global_environment" { + type = string + default = "gbl" + description = "Global environment name" +} + +variable "privileged" { + type = bool + description = "True if the default provider already has access to the backend" + default = false +} + +variable "administrator_account" { + description = "The name of the account that is the Security Hub administrator account" + type = string + default = null +} + +variable "enable_default_standards" { + description = "Flag to indicate whether default standards should be enabled" + type = bool + default = true +} + +variable "enabled_standards" { + description = < +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [http](#requirement\_http) | >= 3.0 | +| [spacelift](#requirement\_spacelift) | >= 0.1.31 | + +## Providers + +| Name | Version | +|------|---------| +| [http](#provider\_http) | >= 3.0 | +| [spacelift](#provider\_spacelift) | >= 0.1.31 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [spacelift_policy.default](https://registry.terraform.io/providers/spacelift-io/spacelift/latest/docs/resources/policy) | resource | +| [http_http.default](https://registry.terraform.io/providers/hashicorp/http/latest/docs/data-sources/http) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels](#input\_labels) | List of global labels to add to each policy. These values can be overridden in `var.policies`'s per policy `labels` key. | `list(string)` | `[]` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [policies](#input\_policies) | The map of required policies to add. | `any` | n/a | yes | +| [policy\_version](#input\_policy\_version) | The optional global policy version injected using a %s in each `body_url`. This can be pinned to a version tag or a branch. | `string` | `"master"` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [space\_id](#input\_space\_id) | The global `space_id` to assign to each policy. This value can be overridden in `var.policies`'s per policy `space_id` key. | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [policies](#output\_policies) | All calculated policies | + + +## References + +* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/spacelift-policy) - Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/tgw/cross-region-spoke/context.tf b/deprecated/spacelift-policy/context.tf similarity index 100% rename from modules/tgw/cross-region-spoke/context.tf rename to deprecated/spacelift-policy/context.tf diff --git a/deprecated/spacelift-policy/main.tf b/deprecated/spacelift-policy/main.tf new file mode 100644 index 000000000..b665cb5b8 --- /dev/null +++ b/deprecated/spacelift-policy/main.tf @@ -0,0 +1,113 @@ +locals { + enabled = module.this.enabled + + # Get all policies without a URL + # { k = { "body": "https://", etc } } + policies_with_body = { + for k, v in var.policies : + # merge them with existing data structure + k => merge(v, { + # append body_append to each body if one exists + "body" = format( + join("\n", [ + "# NOTE: source of policy is in the stack YAML", + "", + "%s", + ]), + lookup(v, "body", file("${path.module}/${lookup(v, "body_path")}")), + ) + }) + if lookup(v, "body", null) != null || lookup(v, "body_path", null) != null + } + + # Get all policies with a URL + # { k = "https://" } + policies_with_body_url = { + for k, v in var.policies : + k => try( + format(v["body_url"], var.policy_version), + v["body_url"], + ) + if lookup(v, "body_url", null) != null + } + + # After downloading the bodies from the URLs + # { k = { "body" = "...", etc } } + policies_with_body_url_downloaded = { + for k, v in local.policies_with_body_url : + # merge them with existing data structure + k => merge(var.policies[k], { + # append body_append to each body if one exists + "body" = format( + join("\n", [ + "# NOTE: source url of policy: %s", + "", + "%s", + ]), + v, + data.http.default[k]["body"], + ) + }) if local.enabled + } + + # TODO: get local policies + + # Merge all the policies together and create policies from this object + all_policies = merge( + local.policies_with_body, + local.policies_with_body_url_downloaded, + ) + + # keep the object keys consistent to avoid terraform errors + policies = { + for k, v in local.all_policies : + k => merge( + # remove optional keys + { + for vk, vv in v : + vk => vv + if !contains([ + "name", + "body_append", + "body_url", + "body_path", + "labels", + "space_id", + ], vk) + }, + # these were previously set in the spacelift_policy resource and moved here + # to avoid terraform errors around inconsistent object keys + { + name = lookup(v, "name", title(join(" ", split("-", k)))) + labels = lookup(v, "labels", var.labels) + space_id = lookup(v, "space_id", var.space_id) + body = lookup(v, "body_append", "") == "" ? v["body"] : format( + join("\n", [ + "%s", + "", + "# NOTE: below is appended to the original policy", + "", + "%s", + ]), + v["body"], + lookup(v, "body_append", "") + ) + }, + ) + } +} + +data "http" "default" { + for_each = local.enabled ? local.policies_with_body_url : {} + url = each.value +} + +resource "spacelift_policy" "default" { + for_each = local.enabled ? local.policies : {} + + name = lookup(each.value, "name") + body = lookup(each.value, "body") + type = upper(lookup(each.value, "type")) + labels = lookup(each.value, "labels") + space_id = lookup(each.value, "space_id") +} diff --git a/deprecated/spacelift-policy/outputs.tf b/deprecated/spacelift-policy/outputs.tf new file mode 100644 index 000000000..62b96771a --- /dev/null +++ b/deprecated/spacelift-policy/outputs.tf @@ -0,0 +1,4 @@ +output "policies" { + description = "All calculated policies" + value = local.policies +} diff --git a/deprecated/spacelift-policy/policies/example.trigger.administrative.rego b/deprecated/spacelift-policy/policies/example.trigger.administrative.rego new file mode 100644 index 000000000..2a9760596 --- /dev/null +++ b/deprecated/spacelift-policy/policies/example.trigger.administrative.rego @@ -0,0 +1,21 @@ +# Local example policy taken from https://raw.githubusercontent.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/%s/catalog/policies/trigger.administrative.rego + +# https://www.openpolicyagent.org/docs/latest/policy-reference/#builtin-strings-stringsany_prefix_match + +package spacelift + +# Trigger the stack after it gets created in the `administrative` stack +trigger[stack.id] { + stack := input.stacks[_] + # compare a plaintext string (stack.id) to a checksum + strings.any_suffix_match(crypto.sha256(stack.id), id_shas_of_created_stacks) +} + +id_shas_of_created_stacks[change.entity.data.values.id] { + change := input.run.changes[_] + change.action == "added" + change.entity.type == "spacelift_stack" + change.phase == "apply" # The change has actually been applied, not just planned +} + +sample { true } diff --git a/deprecated/spacelift-policy/policies/plan.autodeployupdates.rego b/deprecated/spacelift-policy/policies/plan.autodeployupdates.rego new file mode 100644 index 000000000..43a78ceba --- /dev/null +++ b/deprecated/spacelift-policy/policies/plan.autodeployupdates.rego @@ -0,0 +1,21 @@ +package spacelift + +# This policy allows autodeploy if there are only new resources or updates. +# It requires manual intervention (approval) if any of the resources will be deleted. + +# Usage: +# settings: +# spacelift: +# autodeploy: true +# policies_by_name_enabled: +# - plan.autodeployupdates + +warn[sprintf(message, [action, resource.address])] { + message := "action '%s' requires human review (%s)" + review := {"delete"} + + resource := input.terraform.resource_changes[_] + action := resource.change.actions[_] + + review[action] +} diff --git a/deprecated/spacelift-policy/providers.tf b/deprecated/spacelift-policy/providers.tf new file mode 100644 index 000000000..341f51d33 --- /dev/null +++ b/deprecated/spacelift-policy/providers.tf @@ -0,0 +1,5 @@ +# Purposely did not add spacelift inputs since this is a sensitive change and +# it should not be an easy thing to plan this locally. Best to use the exported +# inputs when this is necessary to plan locally. + +provider "spacelift" {} diff --git a/deprecated/spacelift-policy/variables.tf b/deprecated/spacelift-policy/variables.tf new file mode 100644 index 000000000..1fe8defee --- /dev/null +++ b/deprecated/spacelift-policy/variables.tf @@ -0,0 +1,30 @@ +# This input is unused however, this is added by default to every component by atmos +# and this is defined to avoid any `var.region` warnings. +# tflint-ignore: terraform_unused_declarations +variable "region" { + type = string + description = "AWS Region" +} + +variable "policy_version" { + type = string + description = "The optional global policy version injected using a %s in each `body_url`. This can be pinned to a version tag or a branch." + default = "master" +} + +variable "policies" { + type = any + description = "The map of required policies to add." +} + +variable "labels" { + type = list(string) + description = "List of global labels to add to each policy. These values can be overridden in `var.policies`'s per policy `labels` key." + default = [] +} + +variable "space_id" { + type = string + description = "The global `space_id` to assign to each policy. This value can be overridden in `var.policies`'s per policy `space_id` key." + default = "root" +} diff --git a/deprecated/spacelift-policy/versions.tf b/deprecated/spacelift-policy/versions.tf new file mode 100644 index 000000000..136ee3428 --- /dev/null +++ b/deprecated/spacelift-policy/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + spacelift = { + source = "spacelift-io/spacelift" + version = ">= 0.1.31" + } + http = { + source = "hashicorp/http" + version = ">= 3.0" + } + } +} diff --git a/modules/spacelift-worker-pool/README.md b/deprecated/spacelift-worker-pool/README.md similarity index 99% rename from modules/spacelift-worker-pool/README.md rename to deprecated/spacelift-worker-pool/README.md index e72ff71bd..2fa8022a0 100644 --- a/modules/spacelift-worker-pool/README.md +++ b/deprecated/spacelift-worker-pool/README.md @@ -96,14 +96,14 @@ the output to the `trusted_role_arns` list for the `spacelift` role in `aws-team | Name | Source | Version | |------|--------|---------| -| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.0 | -| [autoscale\_group](#module\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.30.1 | -| [ecr](#module\_ecr) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.0 | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [autoscale\_group](#module\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.34.1 | +| [ecr](#module\_ecr) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | | [iam\_label](#module\_iam\_label) | cloudposse/label/null | 0.25.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [security\_group](#module\_security\_group) | cloudposse/security-group/aws | 2.0.0-rc1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | ## Resources diff --git a/modules/tgw/cross-region-spoke/modules/tgw_routes/context.tf b/deprecated/spacelift-worker-pool/context.tf similarity index 100% rename from modules/tgw/cross-region-spoke/modules/tgw_routes/context.tf rename to deprecated/spacelift-worker-pool/context.tf diff --git a/modules/spacelift-worker-pool/data.tf b/deprecated/spacelift-worker-pool/data.tf similarity index 91% rename from modules/spacelift-worker-pool/data.tf rename to deprecated/spacelift-worker-pool/data.tf index 7cf475a29..b069e0725 100644 --- a/modules/spacelift-worker-pool/data.tf +++ b/deprecated/spacelift-worker-pool/data.tf @@ -31,4 +31,9 @@ data "aws_ami" "spacelift" { name = "virtualization-type" values = ["hvm"] } + + filter { + name = "architecture" + values = ["x86_64"] + } } diff --git a/modules/spacelift-worker-pool/iam.tf b/deprecated/spacelift-worker-pool/iam.tf similarity index 100% rename from modules/spacelift-worker-pool/iam.tf rename to deprecated/spacelift-worker-pool/iam.tf diff --git a/modules/spacelift-worker-pool/main.tf b/deprecated/spacelift-worker-pool/main.tf similarity index 99% rename from modules/spacelift-worker-pool/main.tf rename to deprecated/spacelift-worker-pool/main.tf index 5138fa74f..39d7ce2f0 100644 --- a/modules/spacelift-worker-pool/main.tf +++ b/deprecated/spacelift-worker-pool/main.tf @@ -86,7 +86,7 @@ module "security_group" { module "autoscale_group" { source = "cloudposse/ec2-autoscale-group/aws" - version = "0.30.1" + version = "0.34.1" image_id = var.spacelift_ami_id == null ? join("", data.aws_ami.spacelift.*.image_id) : var.spacelift_ami_id instance_type = var.instance_type diff --git a/modules/spacelift-worker-pool/outputs.tf b/deprecated/spacelift-worker-pool/outputs.tf similarity index 100% rename from modules/spacelift-worker-pool/outputs.tf rename to deprecated/spacelift-worker-pool/outputs.tf diff --git a/modules/spacelift-worker-pool/providers.tf b/deprecated/spacelift-worker-pool/providers.tf similarity index 100% rename from modules/spacelift-worker-pool/providers.tf rename to deprecated/spacelift-worker-pool/providers.tf diff --git a/modules/spacelift-worker-pool/remote-state.tf b/deprecated/spacelift-worker-pool/remote-state.tf similarity index 93% rename from modules/spacelift-worker-pool/remote-state.tf rename to deprecated/spacelift-worker-pool/remote-state.tf index 1e56e3749..5d78adb0d 100644 --- a/modules/spacelift-worker-pool/remote-state.tf +++ b/deprecated/spacelift-worker-pool/remote-state.tf @@ -1,6 +1,6 @@ module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.0" + version = "1.4.1" component = "account-map" environment = coalesce(var.account_map_environment_name, module.this.environment) @@ -12,7 +12,7 @@ module "account_map" { module "ecr" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.0" + version = "1.4.1" component = "ecr" environment = coalesce(var.ecr_environment_name, module.this.environment) @@ -24,7 +24,7 @@ module "ecr" { module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.0" + version = "1.4.1" component = "vpc" diff --git a/modules/spacelift-worker-pool/templates/spacelift@.service b/deprecated/spacelift-worker-pool/templates/spacelift@.service similarity index 100% rename from modules/spacelift-worker-pool/templates/spacelift@.service rename to deprecated/spacelift-worker-pool/templates/spacelift@.service diff --git a/modules/spacelift-worker-pool/templates/user-data.sh b/deprecated/spacelift-worker-pool/templates/user-data.sh similarity index 100% rename from modules/spacelift-worker-pool/templates/user-data.sh rename to deprecated/spacelift-worker-pool/templates/user-data.sh diff --git a/modules/spacelift-worker-pool/variables.tf b/deprecated/spacelift-worker-pool/variables.tf similarity index 100% rename from modules/spacelift-worker-pool/variables.tf rename to deprecated/spacelift-worker-pool/variables.tf diff --git a/modules/spacelift-worker-pool/versions.tf b/deprecated/spacelift-worker-pool/versions.tf similarity index 100% rename from modules/spacelift-worker-pool/versions.tf rename to deprecated/spacelift-worker-pool/versions.tf diff --git a/deprecated/spacelift/README.md b/deprecated/spacelift/README.md new file mode 100644 index 000000000..993d9e337 --- /dev/null +++ b/deprecated/spacelift/README.md @@ -0,0 +1,444 @@ +# Component: `spacelift` + +This component is responsible for provisioning Spacelift stacks. + +Spacelift is a specialized, Terraform-compatible continuous integration and deployment (CI/CD) platform for +infrastructure-as-code. It's designed and implemented by long-time DevOps practitioners based on previous experience with +large-scale installations - dozens of teams, hundreds of engineers and tens of thousands of cloud resources. + +## Usage + +**Stack Level**: Regional + +This component provisions an administrative Spacelift stack and assigns it to a worker pool. Although +the stack can manage stacks in any region, it should be provisioned in the same region as the worker pool. + +```yaml +components: + terraform: + spacelift/defaults: + metadata: + type: abstract + component: spacelift + settings: + spacelift: + workspace_enabled: true + administrative: true + autodeploy: true + before_init: + - spacelift-configure + - spacelift-write-vars + - spacelift-tf-workspace + before_plan: + - spacelift-configure + before_apply: + - spacelift-configure + component_root: components/terraform/spacelift + description: Spacelift Administrative stack + stack_destructor_enabled: false + # TODO: replace with the name of the worker pool + worker_pool_name: WORKER_POOL_NAME + repository: infra + branch: main + labels: + - folder:admin + # Do not add normal set of child policies to admin stacks + policies_enabled: [] + policies_by_id_enabled: [] + vars: + enabled: true + spacelift_api_endpoint: https://TODO.app.spacelift.io + administrative_stack_drift_detection_enabled: true + administrative_stack_drift_detection_reconcile: true + administrative_stack_drift_detection_schedule: ["0 4 * * *"] + administrative_trigger_policy_enabled: false + autodeploy: false + aws_role_enabled: false + drift_detection_enabled: true + drift_detection_reconcile: true + drift_detection_schedule: ["0 4 * * *"] + external_execution: true + git_repository: infra # TODO: replace with your repository name + git_branch: main + + # List of available default Rego policies to create in Spacelift. + # These policies are defined in the catalog https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/tree/master/catalog/policies + # These policies will not be attached to Spacelift stacks by default (but will be created in Spacelift, and could be attached to a stack manually). + # For specify policies to attach to each Spacelift stack, use `var.policies_enabled`. + policies_available: + - "git_push.proposed-run" + - "git_push.tracked-run" + - "plan.default" + - "trigger.dependencies" + - "trigger.retries" + + # List of default Rego policies to attach to all Spacelift stacks. + # These policies are defined in the catalog https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/tree/master/catalog/policies + policies_enabled: + - "git_push.proposed-run" + - "git_push.tracked-run" + - "plan.default" + - "trigger.dependencies" + + # List of custom policy names to attach to all Spacelift stacks + # These policies must exist in `components/terraform/spacelift/rego-policies` + policies_by_name_enabled: [] + + runner_image: 000000000000.dkr.ecr.us-west-2.amazonaws.com/infra #TODO: replace with your ECR repository + spacelift_component_path: components/terraform + stack_config_path_template: stacks/%s.yaml + stack_destructor_enabled: false + worker_pool_name_id_map: + -spacelift-worker-pool: SOMEWORKERPOOLID #TODO: replace with your worker pool ID + infracost_enabled: false # TODO: decide on infracost + terraform_version: "1.3.6" + terraform_version_map: + "1": "1.3.6" + + # These could be moved to $PROJECT_ROOT/.spacelift/config.yml + before_init: + - spacelift-configure + - spacelift-write-vars + - spacelift-tf-workspace + before_plan: + - spacelift-configure + before_apply: + - spacelift-configure + + # Manages policies, admin stacks, and core OU accounts + spacelift: + metadata: + component: spacelift + inherits: + - spacelift/defaults + settings: + spacelift: + policies_by_id_enabled: + # This component also creates this policy so this is omitted prior to the first apply + # then added so it's consistent with all admin stacks. + - trigger-administrative-policy + vars: + enabled: true + # Use context_filters to split up admin stack management + # context_filters: + # stages: + # - artifacts + # - audit + # - auto + # - corp + # - dns + # - identity + # - marketplace + # - network + # - public + # - security + # These are the policies created from https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/tree/master/catalog/policies + # Make sure to remove the .rego suffix + policies_available: + - git_push.proposed-run + - git_push.tracked-run + - plan.default + - trigger.dependencies + - trigger.retries + # This is to auto deploy launch template image id changes + - plan.warn-on-resource-changes-except-image-id + # This is the global admin policy + - trigger.administrative + # These are the policies added to each spacelift stack created by this admin stack + policies_enabled: + - git_push.proposed-run + - git_push.tracked-run + - plan.default + - trigger.dependencies + # Keep these empty + policies_by_id_enabled: [] + +``` + +## Prerequisites + +### GitHub Integration + +1. The GitHub owner will need to sign up for a [free trial of Spacelift](https://spacelift.io/free-trial.html) +1. Once an account is created take note of the URL - usually its `https://.app.spacelift.io/` +1. Create a Login Policy + + - Click on Policies then Add Policy + - Use the following policy and replace `GITHUBORG` with the GitHub Organization slug and DEV with the GitHub id for the Dev setting up the Spacelift module. + + ```rego + package spacelift + + # See https://docs.spacelift.io/concepts/policy/login-policy for implementation details. + # Note: Login policies don't affect GitHub organization or SSO admins. + # Note 2: Enabling SSO requires that all users have an IdP (G Suite) account, so we'll just use + # GitHub authentication in the meantime while working with external collaborators. + # Map session input data to human friendly variables to use in policy evaluation + + username := input.session.login + member_of := input.session.teams # Input is friendly name, e.g. "SRE" not "sre" or "@GITHUBORG/sre" + GITHUBORG := input.session.member # Is this user a member of the CUSTOMER GitHub org? + + # Define GitHub usernames of non org external collaborators with admin vs. user access + admin_collaborators := { "DEV" } + user_collaborators := { "GITHUBORG" } # Using GITHUBORG as a placeholder to avoid empty set + + # Grant admin access to GITHUBORG org members in the CloudPosse group + admin { + GITHUBORG + member_of[_] == "CloudPosse" + } + + # Grant admin access to non-GITHUBORG org accounts in the admin_collaborators set + admin { + # not GITHUBORG + admin_collaborators[username] + } + + # Grant user access to GITHUBORG org members in the Developers group + # allow { + # GITHUBORG + # member_of[_] == "Developers" + # } + + # Grant user access to non-GITHUBORG org accounts in the user_collaborators set + allow { + not GITHUBORG + user_collaborators[username] + } + + # Deny access to any non-GITHUBORG org accounts who aren't defined in external collaborators sets + deny { + not GITHUBORG + not user_collaborators[username] + not admin_collaborators[username] + } + + # Grant spaces read only user access to all members + space_read[space.id] { + space := input.spaces[_] + GITHUBORG + } + + # Grant spaces write access to GITHUBORG org members in the Developers group + # space_write[space.id] { + # space := input.spaces[_] + # member_of[_] == "Developers" + # } + ``` + +## Spacelift Layout + +[Runtime configuration](https://docs.spacelift.io/concepts/configuration/runtime-configuration) is a piece of setup +that is applied to individual runs instead of being global to the stack. +It's defined in `.spacelift/config.yml` YAML file at the root of your repository. +It is required for Spacelift to work with `atmos`. + +### Create Spacelift helper scripts + +[/rootfs/usr/local/bin/spacelift-tf-workspace](/rootfs/usr/local/bin/spacelift-tf-workspace) manages selecting or creating a Terraform workspace; similar to how `atmos` manages workspaces +during a Terraform run. + +[/rootfs/usr/local/bin/spacelift-write-vars](/rootfs/usr/local/bin/spacelift-write-vars) writes the component config using `atmos` to the `spacelift.auto.tfvars.json` file. + +**NOTE**: make sure they are all executable: + +```bash +chmod +x rootfs/usr/local/bin/spacelift* +``` + +## Bootstrapping + +After creating & linking Spacelift to this repo (see the +[docs](https://docs.spacelift.io/integrations/github)), follow these steps... + +### Deploy the [`spacelift-worker-pool`](../spacelift-worker-pool) Component + +See [`spacelift-worker-pool` README](../spacelift-worker-pool/README.md) for the configuration and deployment needs. + +### Update the `spacelift` catalog + +1. `git_repository` = Name of `infrastructure` repository +1. `git_branch` = Name of main/master branch +1. `worker_pool_name_id_map` = Map of arbitrary names to IDs Spacelift worker pools, +taken from the `worker_pool_id` output of the `spacelift-worker-pool` component. +1. Set `components.terraform.spacelift.settings.spacelift.worker_pool_name` +to the name of the worker pool you want to use for the `spacelift` component, +the name being the key you set in the `worker_pool_name_id_map` map. + + +### Deploy the admin stacks + +Set these ENV vars: + +```bash +export SPACELIFT_API_KEY_ENDPOINT=https://.app.spacelift.io +export SPACELIFT_API_KEY_ID=... +export SPACELIFT_API_KEY_SECRET=... +``` + +The name of the spacelift stack resource will be different depending on the name of the component and the root atmos stack. +This would be the command if the root atmos stack is `core-gbl-auto` and the spacelift component is `spacelift`. + +``` +atmos terraform apply spacelift --stack core-gbl-auto -target 'module.spacelift.module.stacks["core-gbl-auto-spacelift"]' +``` + +Note that this is the only manually operation you need to perform in `geodesic` using `atmos` to create the initial admin stack. +All other infrastructure stacks wil be created in Spacelift by this admin stack. + + +## Pull Request Workflow + +1. Create a new branch & make changes +2. Create a new pull request (targeting the `main` branch) +3. View the modified resources directly in the pull request +4. View the successful Spacelift checks in the pull request +5. Merge the pull request and check the Spacelift job + + +## spacectl + +See docs https://github.com/spaceone-dev/spacectl + +### Install + +``` +β¨  apt install -y spacectl -qq +``` + +Setup a profile + +``` +β¨  spacectl profile login gbl-identity +Enter Spacelift endpoint (eg. https://unicorn.app.spacelift.io/): https://.app.spacelift.io +Select credentials type: 1 for API key, 2 for GitHub access token: 1 +Enter API key ID: 01FKN... +Enter API key secret: +``` + +### Listing stacks + +```bash +spacectl stack list +``` + +Grab all the stack ids (use the JSON output to avoid bad chars) + +```bash +spacectl stack list --output json | jq -r '.[].id' > stacks.txt +``` + +If the latest commit for each stack is desired, run something like this. + +NOTE: remove the `echo` to remove the dry-run functionality + +```bash +cat stacks.txt | while read stack; do echo $stack && echo spacectl stack set-current-commit --sha 25dd359749cfe30c76cce19f58e0a33555256afd --id $stack; done +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 4.0 | +| [spacelift](#requirement\_spacelift) | >= 0.1.31 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [spacelift](#module\_spacelift) | cloudposse/cloud-infrastructure-automation/spacelift | 0.55.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ssm_parameter.spacelift_key_id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.spacelift_key_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [administrative\_push\_policy\_enabled](#input\_administrative\_push\_policy\_enabled) | Flag to enable/disable the global administrative push policy | `bool` | `true` | no | +| [administrative\_stack\_drift\_detection\_enabled](#input\_administrative\_stack\_drift\_detection\_enabled) | Flag to enable/disable administrative stack drift detection | `bool` | `true` | no | +| [administrative\_stack\_drift\_detection\_reconcile](#input\_administrative\_stack\_drift\_detection\_reconcile) | Flag to enable/disable administrative stack drift automatic reconciliation. If drift is detected and `reconcile` is turned on, Spacelift will create a tracked run to correct the drift | `bool` | `true` | no | +| [administrative\_stack\_drift\_detection\_schedule](#input\_administrative\_stack\_drift\_detection\_schedule) | List of cron expressions to schedule drift detection for the administrative stack | `list(string)` |
[
"0 4 * * *"
]
| no | +| [administrative\_trigger\_policy\_enabled](#input\_administrative\_trigger\_policy\_enabled) | Flag to enable/disable the global administrative trigger policy | `bool` | `true` | no | +| [attachment\_space\_id](#input\_attachment\_space\_id) | Specify the space ID for attachments (e.g. policies, contexts, etc.) | `string` | `"legacy"` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [autodeploy](#input\_autodeploy) | Default autodeploy value for all stacks created by this project | `bool` | n/a | yes | +| [aws\_role\_arn](#input\_aws\_role\_arn) | ARN of the AWS IAM role to assume and put its temporary credentials in the runtime environment | `string` | `null` | no | +| [aws\_role\_enabled](#input\_aws\_role\_enabled) | Flag to enable/disable Spacelift to use AWS STS to assume the supplied IAM role and put its temporary credentials in the runtime environment | `bool` | `false` | no | +| [aws\_role\_external\_id](#input\_aws\_role\_external\_id) | Custom external ID (works only for private workers). See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html for more details | `string` | `null` | no | +| [aws\_role\_generate\_credentials\_in\_worker](#input\_aws\_role\_generate\_credentials\_in\_worker) | Flag to enable/disable generating AWS credentials in the private worker after assuming the supplied IAM role | `bool` | `false` | no | +| [before\_init](#input\_before\_init) | List of before-init scripts | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [context\_filters](#input\_context\_filters) | Context filters to create stacks for specific context information. Valid lists are `namespaces`, `environments`, `tenants`, `stages`. | `map(list(string))` | `{}` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [drift\_detection\_enabled](#input\_drift\_detection\_enabled) | Flag to enable/disable drift detection on the infrastructure stacks | `bool` | `true` | no | +| [drift\_detection\_reconcile](#input\_drift\_detection\_reconcile) | Flag to enable/disable infrastructure stacks drift automatic reconciliation. If drift is detected and `reconcile` is turned on, Spacelift will create a tracked run to correct the drift | `bool` | `true` | no | +| [drift\_detection\_schedule](#input\_drift\_detection\_schedule) | List of cron expressions to schedule drift detection for the infrastructure stacks | `list(string)` |
[
"0 4 * * *"
]
| no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [external\_execution](#input\_external\_execution) | Set this to true if you're calling this module from outside of a Spacelift stack (e.g. the `complete` example) | `bool` | `false` | no | +| [git\_branch](#input\_git\_branch) | The Git branch name | `string` | `"main"` | no | +| [git\_commit\_sha](#input\_git\_commit\_sha) | The commit SHA for which to trigger a run. Requires `var.spacelift_run_enabled` to be set to `true` | `string` | `null` | no | +| [git\_repository](#input\_git\_repository) | The Git repository name | `string` | n/a | yes | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [infracost\_enabled](#input\_infracost\_enabled) | Flag to enable/disable infracost. If this is enabled, it will add infracost label to each stack. See [spacelift infracost](https://docs.spacelift.io/vendors/terraform/infracost) docs for more details. | `bool` | `false` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [policies\_available](#input\_policies\_available) | List of available default policies to create in Spacelift (these policies will not be attached to Spacelift stacks by default, use `var.policies_enabled`) | `list(string)` |
[
"git_push.proposed-run",
"git_push.tracked-run",
"plan.default",
"trigger.dependencies",
"trigger.retries"
]
| no | +| [policies\_by\_id\_enabled](#input\_policies\_by\_id\_enabled) | List of existing policy IDs to attach to all Spacelift stacks. These policies must already exist in Spacelift | `list(string)` | `[]` | no | +| [policies\_by\_name\_enabled](#input\_policies\_by\_name\_enabled) | List of existing policy names to attach to all Spacelift stacks. These policies must exist at `modules/spacelift/rego-policies` OR `var.policies_by_name_path`. | `list(string)` | `[]` | no | +| [policies\_by\_name\_path](#input\_policies\_by\_name\_path) | Path to the catalog of external Rego policies. The Rego files must exist in the caller's code at the path. The module will create Spacelift policies from the external Rego definitions | `string` | `""` | no | +| [policies\_enabled](#input\_policies\_enabled) | DEPRECATED: Use `policies_by_id_enabled` instead. List of default policies created by this stack to attach to all Spacelift stacks | `list(string)` | `[]` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [runner\_image](#input\_runner\_image) | Full address & tag of the Spacelift runner image (e.g. on ECR) | `string` | n/a | yes | +| [spacelift\_api\_endpoint](#input\_spacelift\_api\_endpoint) | The Spacelift API endpoint URL (e.g. https://example.app.spacelift.io) | `string` | n/a | yes | +| [spacelift\_component\_path](#input\_spacelift\_component\_path) | The Spacelift Component Path | `string` | `"components/terraform"` | no | +| [spacelift\_run\_enabled](#input\_spacelift\_run\_enabled) | Enable/disable creation of the `spacelift_run` resource | `bool` | `false` | no | +| [spacelift\_stack\_dependency\_enabled](#input\_spacelift\_stack\_dependency\_enabled) | If enabled, the `spacelift_stack_dependency` Spacelift resource will be used to create dependencies between stacks instead of using the `depends-on` labels. The `depends-on` labels will be removed from the stacks and the trigger policies for dependencies will be detached | `bool` | `false` | no | +| [stack\_config\_path\_template](#input\_stack\_config\_path\_template) | Stack config path template | `string` | `"stacks/%s.yaml"` | no | +| [stack\_destructor\_enabled](#input\_stack\_destructor\_enabled) | Flag to enable/disable the stack destructor to destroy the resources of a stack before deleting the stack itself | `bool` | `false` | no | +| [stacks\_space\_id](#input\_stacks\_space\_id) | Override the space ID for all stacks (unless the stack config has `dedicated_space` set to true). Otherwise, it will default to the admin stack's space. | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tag\_filters](#input\_tag\_filters) | A map of tags that will filter stack creation by the matching `tags` set in a component `vars` configuration. | `map(string)` | `{}` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [terraform\_version](#input\_terraform\_version) | Default Terraform version for all stacks created by this project | `string` | n/a | yes | +| [terraform\_version\_map](#input\_terraform\_version\_map) | A map to determine which Terraform patch version to use for each minor version | `map(string)` | `{}` | no | +| [worker\_pool\_name\_id\_map](#input\_worker\_pool\_name\_id\_map) | Map of worker pool names to worker pool IDs | `map(any)` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [stacks](#output\_stacks) | Spacelift stacks | + + +## References + +* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/spacelift) - Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/tgw/cross-region-spoke/modules/vpc_routes/context.tf b/deprecated/spacelift/context.tf similarity index 100% rename from modules/tgw/cross-region-spoke/modules/vpc_routes/context.tf rename to deprecated/spacelift/context.tf diff --git a/modules/spacelift/docs/example-spacelift-config.yml b/deprecated/spacelift/docs/example-spacelift-config.yml similarity index 100% rename from modules/spacelift/docs/example-spacelift-config.yml rename to deprecated/spacelift/docs/example-spacelift-config.yml diff --git a/modules/spacelift/docs/example-stacks_catalog_spacelift.yaml b/deprecated/spacelift/docs/example-stacks_catalog_spacelift.yaml similarity index 100% rename from modules/spacelift/docs/example-stacks_catalog_spacelift.yaml rename to deprecated/spacelift/docs/example-stacks_catalog_spacelift.yaml diff --git a/modules/spacelift/docs/example-stacks_deployed_spacelift.yaml b/deprecated/spacelift/docs/example-stacks_deployed_spacelift.yaml similarity index 100% rename from modules/spacelift/docs/example-stacks_deployed_spacelift.yaml rename to deprecated/spacelift/docs/example-stacks_deployed_spacelift.yaml diff --git a/modules/spacelift/docs/img/Spacelift-Infrastructure-Behavior.png b/deprecated/spacelift/docs/img/Spacelift-Infrastructure-Behavior.png similarity index 100% rename from modules/spacelift/docs/img/Spacelift-Infrastructure-Behavior.png rename to deprecated/spacelift/docs/img/Spacelift-Infrastructure-Behavior.png diff --git a/modules/spacelift/docs/img/Spacelift-Merge-Execution.png b/deprecated/spacelift/docs/img/Spacelift-Merge-Execution.png similarity index 100% rename from modules/spacelift/docs/img/Spacelift-Merge-Execution.png rename to deprecated/spacelift/docs/img/Spacelift-Merge-Execution.png diff --git a/modules/spacelift/docs/img/Spacelift-PR-Changes.png b/deprecated/spacelift/docs/img/Spacelift-PR-Changes.png similarity index 100% rename from modules/spacelift/docs/img/Spacelift-PR-Changes.png rename to deprecated/spacelift/docs/img/Spacelift-PR-Changes.png diff --git a/modules/spacelift/docs/img/Spacelift-PR-Checks.png b/deprecated/spacelift/docs/img/Spacelift-PR-Checks.png similarity index 100% rename from modules/spacelift/docs/img/Spacelift-PR-Checks.png rename to deprecated/spacelift/docs/img/Spacelift-PR-Checks.png diff --git a/modules/spacelift/docs/spacectl.md b/deprecated/spacelift/docs/spacectl.md similarity index 100% rename from modules/spacelift/docs/spacectl.md rename to deprecated/spacelift/docs/spacectl.md diff --git a/modules/spacelift/docs/spacelift-overview.md b/deprecated/spacelift/docs/spacelift-overview.md similarity index 98% rename from modules/spacelift/docs/spacelift-overview.md rename to deprecated/spacelift/docs/spacelift-overview.md index b232276de..5671e4719 100644 --- a/modules/spacelift/docs/spacelift-overview.md +++ b/deprecated/spacelift/docs/spacelift-overview.md @@ -6,7 +6,7 @@ large-scale installations - dozens of teams, hundreds of engineers and tens of t ## Projects & Configuration -There are two projects located in this repository that are required for the deplyoment & day-to-day operation of +There are two projects located in this repository that are required for the deployment & day-to-day operation of Spacelift. | Project | Description | diff --git a/modules/spacelift/main.tf b/deprecated/spacelift/main.tf similarity index 77% rename from modules/spacelift/main.tf rename to deprecated/spacelift/main.tf index 69d7c32b5..4320947a3 100644 --- a/modules/spacelift/main.tf +++ b/deprecated/spacelift/main.tf @@ -1,14 +1,16 @@ -provider "spacelift" {} - module "spacelift" { source = "cloudposse/cloud-infrastructure-automation/spacelift" - version = "0.49.5" + version = "0.55.0" context_filters = var.context_filters + tag_filters = var.tag_filters stack_config_path_template = var.stack_config_path_template components_path = var.spacelift_component_path + stacks_space_id = var.stacks_space_id + attachment_space_id = var.attachment_space_id + branch = var.git_branch repository = var.git_repository commit_sha = var.git_commit_sha @@ -21,16 +23,18 @@ module "spacelift" { terraform_version = var.terraform_version terraform_version_map = var.terraform_version_map - imports_processing_enabled = false - stack_deps_processing_enabled = false - component_deps_processing_enabled = true + imports_processing_enabled = false + stack_deps_processing_enabled = false + component_deps_processing_enabled = true + spacelift_stack_dependency_enabled = var.spacelift_stack_dependency_enabled policies_available = var.policies_available policies_enabled = var.policies_enabled policies_by_id_enabled = var.policies_by_id_enabled policies_by_name_enabled = var.policies_by_name_enabled - policies_by_name_path = format("%s/rego-policies", path.module) + policies_by_name_path = var.policies_by_name_path != "" ? var.policies_by_name_path : format("%s/rego-policies", path.module) + administrative_push_policy_enabled = var.administrative_push_policy_enabled administrative_trigger_policy_enabled = var.administrative_trigger_policy_enabled administrative_stack_drift_detection_enabled = var.administrative_stack_drift_detection_enabled diff --git a/modules/spacelift/outputs.tf b/deprecated/spacelift/outputs.tf similarity index 100% rename from modules/spacelift/outputs.tf rename to deprecated/spacelift/outputs.tf diff --git a/deprecated/spacelift/providers.tf b/deprecated/spacelift/providers.tf new file mode 100644 index 000000000..54257fd20 --- /dev/null +++ b/deprecated/spacelift/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/deprecated/spacelift/rego-policies/access.default.rego b/deprecated/spacelift/rego-policies/access.default.rego new file mode 100644 index 000000000..6945b3a41 --- /dev/null +++ b/deprecated/spacelift/rego-policies/access.default.rego @@ -0,0 +1,9 @@ +package spacelift + +# Access Policy Documentation: +# https://docs.spacelift.io/concepts/policy/stack-access-policy + +# By default, allow READ access to everybody who has permissions to login to Spacelift +read { + true +} diff --git a/deprecated/spacelift/rego-policies/plan.autodeployupdates.rego b/deprecated/spacelift/rego-policies/plan.autodeployupdates.rego new file mode 100644 index 000000000..43a78ceba --- /dev/null +++ b/deprecated/spacelift/rego-policies/plan.autodeployupdates.rego @@ -0,0 +1,21 @@ +package spacelift + +# This policy allows autodeploy if there are only new resources or updates. +# It requires manual intervention (approval) if any of the resources will be deleted. + +# Usage: +# settings: +# spacelift: +# autodeploy: true +# policies_by_name_enabled: +# - plan.autodeployupdates + +warn[sprintf(message, [action, resource.address])] { + message := "action '%s' requires human review (%s)" + review := {"delete"} + + resource := input.terraform.resource_changes[_] + action := resource.change.actions[_] + + review[action] +} diff --git a/deprecated/spacelift/rego-policies/plan.ecr.rego b/deprecated/spacelift/rego-policies/plan.ecr.rego new file mode 100644 index 000000000..23f4ef8d4 --- /dev/null +++ b/deprecated/spacelift/rego-policies/plan.ecr.rego @@ -0,0 +1,16 @@ +package spacelift + +proposed := input.spacelift.run.type == "PROPOSED" + +deny[reason] { not proposed; reason := resource_deletion[_] } +warn[reason] { proposed; reason := resource_deletion[_] } + +resource_deletion[sprintf(message, [action, resource.address])] { + message := "action '%s' requires human review (%s)" + review := {"delete"} + types := {"aws_ecr_repository"} + resource := input.terraform.resource_changes[_] + action := resource.change.actions[_] + review[action] + types[resource.type] +} diff --git a/deprecated/spacelift/spacelift-provider.tf b/deprecated/spacelift/spacelift-provider.tf new file mode 100644 index 000000000..dc5400223 --- /dev/null +++ b/deprecated/spacelift/spacelift-provider.tf @@ -0,0 +1,26 @@ +variable "spacelift_api_endpoint" { + type = string + description = "The Spacelift API endpoint URL (e.g. https://example.app.spacelift.io)" +} + +# The Spacelift always validates its credentials, so we always pass api_key_id and api_key_secret +data "aws_ssm_parameter" "spacelift_key_id" { + count = local.enabled ? 1 : 0 + name = "/spacelift/key_id" +} + +data "aws_ssm_parameter" "spacelift_key_secret" { + count = local.enabled ? 1 : 0 + name = "/spacelift/key_secret" +} + +locals { + enabled = module.this.enabled +} + +# This provider always validates its credentials, so we always pass api_key_id and api_key_secret +provider "spacelift" { + api_key_endpoint = var.spacelift_api_endpoint + api_key_id = local.enabled ? data.aws_ssm_parameter.spacelift_key_id[0].value : null + api_key_secret = local.enabled ? data.aws_ssm_parameter.spacelift_key_secret[0].value : null +} diff --git a/modules/spacelift/variables.tf b/deprecated/spacelift/variables.tf similarity index 80% rename from modules/spacelift/variables.tf rename to deprecated/spacelift/variables.tf index cfbb4f020..4b878ac84 100644 --- a/modules/spacelift/variables.tf +++ b/deprecated/spacelift/variables.tf @@ -8,12 +8,6 @@ variable "runner_image" { description = "Full address & tag of the Spacelift runner image (e.g. on ECR)" } -variable "worker_pool_id" { - type = string - description = "DEPRECATED: Use worker_pool_name_id_map instead. Worker pool ID" - default = "" -} - variable "worker_pool_name_id_map" { type = map(any) description = "Map of worker pool names to worker pool IDs" @@ -97,10 +91,16 @@ variable "policies_by_id_enabled" { variable "policies_by_name_enabled" { type = list(string) - description = "List of existing policy names to attach to all Spacelift stacks. These policies must exist in `modules/spacelift/rego-policies`" + description = "List of existing policy names to attach to all Spacelift stacks. These policies must exist at `modules/spacelift/rego-policies` OR `var.policies_by_name_path`." default = [] } +variable "policies_by_name_path" { + type = string + description = "Path to the catalog of external Rego policies. The Rego files must exist in the caller's code at the path. The module will create Spacelift policies from the external Rego definitions" + default = "" +} + variable "administrative_stack_drift_detection_enabled" { type = bool description = "Flag to enable/disable administrative stack drift detection" @@ -173,6 +173,18 @@ variable "context_filters" { default = {} } +variable "tag_filters" { + type = map(string) + description = "A map of tags that will filter stack creation by the matching `tags` set in a component `vars` configuration." + default = {} +} + +variable "administrative_push_policy_enabled" { + type = bool + description = "Flag to enable/disable the global administrative push policy" + default = true +} + variable "administrative_trigger_policy_enabled" { type = bool description = "Flag to enable/disable the global administrative trigger policy" @@ -196,3 +208,21 @@ variable "before_init" { description = "List of before-init scripts" default = [] } + +variable "attachment_space_id" { + type = string + description = "Specify the space ID for attachments (e.g. policies, contexts, etc.)" + default = "legacy" +} + +variable "stacks_space_id" { + type = string + description = "Override the space ID for all stacks (unless the stack config has `dedicated_space` set to true). Otherwise, it will default to the admin stack's space." + default = null +} + +variable "spacelift_stack_dependency_enabled" { + type = bool + description = "If enabled, the `spacelift_stack_dependency` Spacelift resource will be used to create dependencies between stacks instead of using the `depends-on` labels. The `depends-on` labels will be removed from the stacks and the trigger policies for dependencies will be detached" + default = false +} diff --git a/deprecated/spacelift/versions.tf b/deprecated/spacelift/versions.tf new file mode 100644 index 000000000..1174cd191 --- /dev/null +++ b/deprecated/spacelift/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + spacelift = { + source = "spacelift-io/spacelift" + version = ">= 0.1.31" + } + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/sso/README.md b/deprecated/sso/README.md similarity index 98% rename from modules/sso/README.md rename to deprecated/sso/README.md index 6627e2045..86ac7e2c8 100644 --- a/modules/sso/README.md +++ b/deprecated/sso/README.md @@ -71,7 +71,6 @@ components: | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [root\_account\_tenant\_name](#input\_root\_account\_tenant\_name) | The tenant name for the root account | `string` | `null` | no | | [saml\_providers](#input\_saml\_providers) | Map of provider names to XML data filenames | `map(string)` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | diff --git a/deprecated/sso/context.tf b/deprecated/sso/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/sso/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/sso/main.tf b/deprecated/sso/main.tf similarity index 100% rename from modules/sso/main.tf rename to deprecated/sso/main.tf diff --git a/deprecated/sso/modules/okta-user/context.tf b/deprecated/sso/modules/okta-user/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/sso/modules/okta-user/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/sso/modules/okta-user/main.tf b/deprecated/sso/modules/okta-user/main.tf similarity index 100% rename from modules/sso/modules/okta-user/main.tf rename to deprecated/sso/modules/okta-user/main.tf diff --git a/modules/sso/modules/okta-user/outputs.tf b/deprecated/sso/modules/okta-user/outputs.tf similarity index 100% rename from modules/sso/modules/okta-user/outputs.tf rename to deprecated/sso/modules/okta-user/outputs.tf diff --git a/modules/sso/modules/okta-user/variables.tf b/deprecated/sso/modules/okta-user/variables.tf similarity index 100% rename from modules/sso/modules/okta-user/variables.tf rename to deprecated/sso/modules/okta-user/variables.tf diff --git a/modules/sso/outputs.tf b/deprecated/sso/outputs.tf similarity index 100% rename from modules/sso/outputs.tf rename to deprecated/sso/outputs.tf diff --git a/modules/sso/providers.tf b/deprecated/sso/providers.tf similarity index 69% rename from modules/sso/providers.tf rename to deprecated/sso/providers.tf index 54f0d0f04..c4f45ca75 100644 --- a/modules/sso/providers.tf +++ b/deprecated/sso/providers.tf @@ -12,10 +12,9 @@ provider "aws" { } module "iam_roles" { - source = "../account-map/modules/iam-roles" - privileged = true - global_tenant_name = var.root_account_tenant_name - context = module.this.context + source = "../account-map/modules/iam-roles" + privileged = true + context = module.this.context } variable "import_profile_name" { @@ -29,9 +28,3 @@ variable "import_role_arn" { default = null description = "IAM Role ARN to use when importing a resource" } - -variable "root_account_tenant_name" { - type = string - description = "The tenant name for the root account" - default = null -} diff --git a/modules/sso/variables.tf b/deprecated/sso/variables.tf similarity index 100% rename from modules/sso/variables.tf rename to deprecated/sso/variables.tf diff --git a/modules/sso/versions.tf b/deprecated/sso/versions.tf similarity index 100% rename from modules/sso/versions.tf rename to deprecated/sso/versions.tf diff --git a/deprecated/tgw/cross-region-hub-connector/README.md b/deprecated/tgw/cross-region-hub-connector/README.md new file mode 100644 index 000000000..e80e18c14 --- /dev/null +++ b/deprecated/tgw/cross-region-hub-connector/README.md @@ -0,0 +1,109 @@ +# Component: `cross-region-hub-connector` + +This component is responsible for provisioning an [AWS Transit Gateway Peering Connection](https://aws.amazon.com/transit-gateway) to connect TGWs from different accounts and(or) regions. + +## Usage + +**Stack Level**: Regional + +This component is deployed to each off-region tgw/hub. +meaning if your home region is `region-a`, and you just created a `tgw/hub` in `region-a` and `region-b`. To peer them, deploy this +to `region-b` + +This can be done by setting up a catalog to point to the main region, and simply importing it. + +```yaml +components: + terraform: + tgw/cross-region-hub-connector: + vars: + enabled: true + account_map_tenant_name: core + this_region: + tgw_stage_name: network + tgw_tenant_name: core + home_region: + tgw_name_format: "%s-%s" + tgw_stage_name: network + tgw_tenant_name: core + environment: region-a #short or fixed notation + region: region-a +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws.tgw\_home\_region](#provider\_aws.tgw\_home\_region) | >= 4.0 | +| [aws.tgw\_this\_region](#provider\_aws.tgw\_this\_region) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [iam\_role\_tgw\_home\_region](#module\_iam\_role\_tgw\_home\_region) | ../../account-map/modules/iam-roles | n/a | +| [iam\_role\_tgw\_this\_region](#module\_iam\_role\_tgw\_this\_region) | ../../account-map/modules/iam-roles | n/a | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [tgw\_home\_region](#module\_tgw\_home\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [tgw\_this\_region](#module\_tgw\_this\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ec2_transit_gateway_peering_attachment.tgw_peering](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_peering_attachment) | resource | +| [aws_ec2_transit_gateway_peering_attachment_accepter.tgw_peering_accepter](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_peering_attachment_accepter) | resource | +| [aws_ec2_transit_gateway_route_table_association.tgw_rt_associate_peering_cross_region](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_route_table_association) | resource | +| [aws_ec2_transit_gateway_route_table_association.tgw_rt_associate_peering_in_region](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_route_table_association) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant\_name](#input\_account\_map\_tenant\_name) | The name of the tenant where `account_map` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [home\_region](#input\_home\_region) | Acceptors region config. Describe the transit gateway that should accept the peering |
object({
tgw_name_format = string
tgw_stage_name = string
tgw_tenant_name = string
region = string
environment = string
})
| n/a | yes | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [this\_region](#input\_this\_region) | Initiators region config. Describe the transit gateway that should originate the peering |
object({
tgw_stage_name = string
tgw_tenant_name = string
})
| n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [aws\_ec2\_transit\_gateway\_peering\_attachment\_id](#output\_aws\_ec2\_transit\_gateway\_peering\_attachment\_id) | Transit Gateway Peering Attachment ID | + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/tgw/cross-region-hub-connector) - Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/deprecated/tgw/cross-region-hub-connector/context.tf b/deprecated/tgw/cross-region-hub-connector/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/tgw/cross-region-hub-connector/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/deprecated/tgw/cross-region-hub-connector/main.tf b/deprecated/tgw/cross-region-hub-connector/main.tf new file mode 100644 index 000000000..614418980 --- /dev/null +++ b/deprecated/tgw/cross-region-hub-connector/main.tf @@ -0,0 +1,40 @@ +locals { + enabled = module.this.enabled +} + +# connects two transit gateways that are cross region +resource "aws_ec2_transit_gateway_peering_attachment" "tgw_peering" { + count = local.enabled ? 1 : 0 + provider = aws.tgw_this_region + peer_account_id = module.account_map.outputs.full_account_map[format(var.home_region.tgw_name_format, var.home_region.tgw_tenant_name, var.home_region.tgw_stage_name)] + peer_region = var.home_region.region + peer_transit_gateway_id = module.tgw_home_region.outputs.transit_gateway_id + transit_gateway_id = module.tgw_this_region.outputs.transit_gateway_id + tags = module.this.tags +} + +# accepts the above +resource "aws_ec2_transit_gateway_peering_attachment_accepter" "tgw_peering_accepter" { + count = local.enabled ? 1 : 0 + provider = aws.tgw_home_region + transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.tgw_peering.*.id) + tags = module.this.tags +} + +resource "aws_ec2_transit_gateway_route_table_association" "tgw_rt_associate_peering_in_region" { + count = local.enabled ? 1 : 0 + depends_on = [aws_ec2_transit_gateway_peering_attachment_accepter.tgw_peering_accepter] + + provider = aws.tgw_this_region + transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.tgw_peering.*.id) + transit_gateway_route_table_id = module.tgw_this_region.outputs.transit_gateway_route_table_id +} + +resource "aws_ec2_transit_gateway_route_table_association" "tgw_rt_associate_peering_cross_region" { + count = local.enabled ? 1 : 0 + depends_on = [aws_ec2_transit_gateway_peering_attachment_accepter.tgw_peering_accepter] + + provider = aws.tgw_home_region + transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.tgw_peering.*.id) + transit_gateway_route_table_id = module.tgw_home_region.outputs.transit_gateway_route_table_id +} diff --git a/deprecated/tgw/cross-region-hub-connector/outputs.tf b/deprecated/tgw/cross-region-hub-connector/outputs.tf new file mode 100644 index 000000000..8658de93d --- /dev/null +++ b/deprecated/tgw/cross-region-hub-connector/outputs.tf @@ -0,0 +1,4 @@ +output "aws_ec2_transit_gateway_peering_attachment_id" { + value = join("", aws_ec2_transit_gateway_peering_attachment.tgw_peering.*.id) + description = "Transit Gateway Peering Attachment ID" +} diff --git a/deprecated/tgw/cross-region-hub-connector/providers.tf b/deprecated/tgw/cross-region-hub-connector/providers.tf new file mode 100644 index 000000000..058478e11 --- /dev/null +++ b/deprecated/tgw/cross-region-hub-connector/providers.tf @@ -0,0 +1,62 @@ +# Assuming region-a is default. +# tgw_this_region is network of region-b +# tgw_home_reigon is network of region-a + +provider "aws" { + alias = "tgw_this_region" + region = var.region + + profile = module.iam_role_tgw_this_region.profiles_enabled ? coalesce(var.import_profile_name, module.iam_role_tgw_this_region.terraform_profile_name) : null + dynamic "assume_role" { + for_each = module.iam_role_tgw_this_region.profiles_enabled ? [] : ["role"] + content { + role_arn = coalesce(var.import_role_arn, module.iam_role_tgw_this_region.terraform_role_arn) + } + } +} + +provider "aws" { + alias = "tgw_home_region" + region = var.home_region.region + + profile = module.iam_role_tgw_home_region.profiles_enabled ? coalesce(var.import_profile_name, module.iam_role_tgw_home_region.terraform_profile_name) : null + dynamic "assume_role" { + for_each = module.iam_role_tgw_home_region.profiles_enabled ? [] : ["role"] + content { + role_arn = coalesce(var.import_role_arn, module.iam_role_tgw_home_region.terraform_role_arn) + } + } +} + +module "iam_role_tgw_this_region" { + source = "../../account-map/modules/iam-roles" + stage = var.this_region.tgw_stage_name + tenant = var.this_region.tgw_tenant_name + context = module.this.context +} + +module "iam_role_tgw_home_region" { + source = "../../account-map/modules/iam-roles" + stage = var.home_region.tgw_stage_name + tenant = var.home_region.tgw_tenant_name + environment = var.home_region.environment + context = module.this.context +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + global_tenant_name = var.account_map_tenant_name + context = module.this.context +} + +variable "import_profile_name" { + type = string + default = null + description = "AWS Profile name to use when importing a resource" +} + +variable "import_role_arn" { + type = string + default = null + description = "IAM Role ARN to use when importing a resource" +} diff --git a/deprecated/tgw/cross-region-hub-connector/remote-state.tf b/deprecated/tgw/cross-region-hub-connector/remote-state.tf new file mode 100644 index 000000000..9f3339c58 --- /dev/null +++ b/deprecated/tgw/cross-region-hub-connector/remote-state.tf @@ -0,0 +1,31 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + component = "account-map" + stage = "root" + environment = "gbl" + tenant = var.account_map_tenant_name + context = module.this.context +} + +module "tgw_this_region" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + component = "tgw/hub" + stage = var.this_region["tgw_stage_name"] + tenant = var.this_region["tgw_tenant_name"] + context = module.this.context +} + +module "tgw_home_region" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + component = "tgw/hub" + stage = var.home_region["tgw_stage_name"] + environment = var.home_region["environment"] + tenant = var.home_region["tgw_tenant_name"] + context = module.this.context +} diff --git a/deprecated/tgw/cross-region-hub-connector/variables.tf b/deprecated/tgw/cross-region-hub-connector/variables.tf new file mode 100644 index 000000000..753ad5f74 --- /dev/null +++ b/deprecated/tgw/cross-region-hub-connector/variables.tf @@ -0,0 +1,33 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "this_region" { + type = object({ + tgw_stage_name = string + tgw_tenant_name = string + }) + description = "Initiators region config. Describe the transit gateway that should originate the peering" +} + +variable "home_region" { + type = object({ + tgw_name_format = string + tgw_stage_name = string + tgw_tenant_name = string + region = string + environment = string + }) + description = "Acceptors region config. Describe the transit gateway that should accept the peering" +} + +variable "account_map_tenant_name" { + type = string + description = <<-EOT + The name of the tenant where `account_map` is provisioned. + + If the `tenant` label is not used, leave this as `null`. + EOT + default = null +} diff --git a/modules/eks/efs/versions.tf b/deprecated/tgw/cross-region-hub-connector/versions.tf similarity index 83% rename from modules/eks/efs/versions.tf rename to deprecated/tgw/cross-region-hub-connector/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/eks/efs/versions.tf +++ b/deprecated/tgw/cross-region-hub-connector/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/tgw/cross-region-spoke/README.md b/deprecated/tgw/cross-region-spoke/README.md similarity index 96% rename from modules/tgw/cross-region-spoke/README.md rename to deprecated/tgw/cross-region-spoke/README.md index 7860bf2c1..6c7803eed 100644 --- a/modules/tgw/cross-region-spoke/README.md +++ b/deprecated/tgw/cross-region-spoke/README.md @@ -44,7 +44,7 @@ components: | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers @@ -54,21 +54,21 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | | [az\_abbreviation](#module\_az\_abbreviation) | cloudposse/utils/aws | 1.0.0 | | [iam\_role\_tgw\_home\_region](#module\_iam\_role\_tgw\_home\_region) | ../../account-map/modules/iam-roles | n/a | | [iam\_role\_tgw\_this\_region](#module\_iam\_role\_tgw\_this\_region) | ../../account-map/modules/iam-roles | n/a | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [tgw\_cross\_region\_connector](#module\_tgw\_cross\_region\_connector) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [tgw\_home\_region](#module\_tgw\_home\_region) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [tgw\_cross\_region\_connector](#module\_tgw\_cross\_region\_connector) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [tgw\_home\_region](#module\_tgw\_home\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | | [tgw\_routes\_home\_region](#module\_tgw\_routes\_home\_region) | ./modules/tgw_routes | n/a | | [tgw\_routes\_this\_region](#module\_tgw\_routes\_this\_region) | ./modules/tgw_routes | n/a | -| [tgw\_this\_region](#module\_tgw\_this\_region) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [tgw\_this\_region](#module\_tgw\_this\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | | [vpc\_routes\_home](#module\_vpc\_routes\_home) | ./modules/vpc_routes | n/a | | [vpc\_routes\_this](#module\_vpc\_routes\_this) | ./modules/vpc_routes | n/a | -| [vpcs\_home\_region](#module\_vpcs\_home\_region) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [vpcs\_this\_region](#module\_vpcs\_this\_region) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpcs\_home\_region](#module\_vpcs\_home\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [vpcs\_this\_region](#module\_vpcs\_this\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | ## Resources @@ -108,10 +108,10 @@ No resources. | Name | Description | |------|-------------| -| [tgw\_routes\_home\_region](#output\_tgw\_routes\_home\_region) | n/a | -| [tgw\_routes\_in\_region](#output\_tgw\_routes\_in\_region) | n/a | -| [vpc\_routes\_home](#output\_vpc\_routes\_home) | n/a | -| [vpc\_routes\_this](#output\_vpc\_routes\_this) | n/a | +| [tgw\_routes\_home\_region](#output\_tgw\_routes\_home\_region) | TGW Routes to the primary region | +| [tgw\_routes\_in\_region](#output\_tgw\_routes\_in\_region) | TGW reoutes in this region | +| [vpc\_routes\_home](#output\_vpc\_routes\_home) | VPC routes to the primary VPC | +| [vpc\_routes\_this](#output\_vpc\_routes\_this) | This modules VPC routes | ## References diff --git a/deprecated/tgw/cross-region-spoke/context.tf b/deprecated/tgw/cross-region-spoke/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/tgw/cross-region-spoke/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/tgw/cross-region-spoke/main.tf b/deprecated/tgw/cross-region-spoke/main.tf similarity index 100% rename from modules/tgw/cross-region-spoke/main.tf rename to deprecated/tgw/cross-region-spoke/main.tf diff --git a/deprecated/tgw/cross-region-spoke/modules/tgw_routes/context.tf b/deprecated/tgw/cross-region-spoke/modules/tgw_routes/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/tgw/cross-region-spoke/modules/tgw_routes/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/tgw/cross-region-spoke/modules/tgw_routes/main.tf b/deprecated/tgw/cross-region-spoke/modules/tgw_routes/main.tf similarity index 100% rename from modules/tgw/cross-region-spoke/modules/tgw_routes/main.tf rename to deprecated/tgw/cross-region-spoke/modules/tgw_routes/main.tf diff --git a/deprecated/tgw/cross-region-spoke/modules/tgw_routes/outputs.tf b/deprecated/tgw/cross-region-spoke/modules/tgw_routes/outputs.tf new file mode 100644 index 000000000..ba6b38b14 --- /dev/null +++ b/deprecated/tgw/cross-region-spoke/modules/tgw_routes/outputs.tf @@ -0,0 +1,4 @@ +output "aws_ec2_transit_gateway_routes" { + value = aws_ec2_transit_gateway_route.default + description = "AWS EC2 Transit Gateway routes" +} diff --git a/modules/tgw/cross-region-spoke/modules/tgw_routes/variables.tf b/deprecated/tgw/cross-region-spoke/modules/tgw_routes/variables.tf similarity index 100% rename from modules/tgw/cross-region-spoke/modules/tgw_routes/variables.tf rename to deprecated/tgw/cross-region-spoke/modules/tgw_routes/variables.tf diff --git a/modules/sqs-queue/modules/terraform-aws-sqs-queue/versions.tf b/deprecated/tgw/cross-region-spoke/modules/tgw_routes/versions.tf similarity index 83% rename from modules/sqs-queue/modules/terraform-aws-sqs-queue/versions.tf rename to deprecated/tgw/cross-region-spoke/modules/tgw_routes/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/sqs-queue/modules/terraform-aws-sqs-queue/versions.tf +++ b/deprecated/tgw/cross-region-spoke/modules/tgw_routes/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/deprecated/tgw/cross-region-spoke/modules/vpc_routes/context.tf b/deprecated/tgw/cross-region-spoke/modules/vpc_routes/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/tgw/cross-region-spoke/modules/vpc_routes/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/tgw/cross-region-spoke/modules/vpc_routes/main.tf b/deprecated/tgw/cross-region-spoke/modules/vpc_routes/main.tf similarity index 100% rename from modules/tgw/cross-region-spoke/modules/vpc_routes/main.tf rename to deprecated/tgw/cross-region-spoke/modules/vpc_routes/main.tf diff --git a/deprecated/tgw/cross-region-spoke/modules/vpc_routes/outputs.tf b/deprecated/tgw/cross-region-spoke/modules/vpc_routes/outputs.tf new file mode 100644 index 000000000..e706a95fe --- /dev/null +++ b/deprecated/tgw/cross-region-spoke/modules/vpc_routes/outputs.tf @@ -0,0 +1,4 @@ +output "aws_routes" { + value = aws_route.route + description = "AWS Routes" +} diff --git a/modules/tgw/cross-region-spoke/modules/vpc_routes/variables.tf b/deprecated/tgw/cross-region-spoke/modules/vpc_routes/variables.tf similarity index 100% rename from modules/tgw/cross-region-spoke/modules/vpc_routes/variables.tf rename to deprecated/tgw/cross-region-spoke/modules/vpc_routes/variables.tf diff --git a/modules/tgw/cross-region-spoke/modules/tgw_routes/versions.tf b/deprecated/tgw/cross-region-spoke/modules/vpc_routes/versions.tf similarity index 83% rename from modules/tgw/cross-region-spoke/modules/tgw_routes/versions.tf rename to deprecated/tgw/cross-region-spoke/modules/vpc_routes/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/tgw/cross-region-spoke/modules/tgw_routes/versions.tf +++ b/deprecated/tgw/cross-region-spoke/modules/vpc_routes/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/deprecated/tgw/cross-region-spoke/outputs.tf b/deprecated/tgw/cross-region-spoke/outputs.tf new file mode 100644 index 000000000..1bcf17612 --- /dev/null +++ b/deprecated/tgw/cross-region-spoke/outputs.tf @@ -0,0 +1,27 @@ +output "vpc_routes_this" { + value = module.vpc_routes_this + description = "This modules VPC routes" +} + +output "tgw_routes_in_region" { + value = module.tgw_routes_this_region + description = "TGW reoutes in this region" +} + +output "vpc_routes_home" { + value = module.vpc_routes_home + description = "VPC routes to the primary VPC" +} + +output "tgw_routes_home_region" { + value = module.tgw_routes_home_region + description = "TGW Routes to the primary region" +} +# +#output "tgw_this_region" { +# value = module.tgw_this_region +#} +# +#output "vpcs_this_region" { +# value = module.vpcs_this_region +#} diff --git a/modules/tgw/cross-region-spoke/providers.tf b/deprecated/tgw/cross-region-spoke/providers.tf similarity index 100% rename from modules/tgw/cross-region-spoke/providers.tf rename to deprecated/tgw/cross-region-spoke/providers.tf diff --git a/modules/tgw/cross-region-spoke/remote-state.tf b/deprecated/tgw/cross-region-spoke/remote-state.tf similarity index 94% rename from modules/tgw/cross-region-spoke/remote-state.tf rename to deprecated/tgw/cross-region-spoke/remote-state.tf index aaa246847..de7ab6a31 100644 --- a/modules/tgw/cross-region-spoke/remote-state.tf +++ b/deprecated/tgw/cross-region-spoke/remote-state.tf @@ -1,6 +1,6 @@ module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" component = "account-map" stage = "root" @@ -11,7 +11,7 @@ module "account_map" { module "vpcs_this_region" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" for_each = toset(concat(tolist(var.this_region.connections), [var.tenant == null ? module.this.stage : format("%s-%s", module.this.tenant, module.this.stage)])) @@ -25,7 +25,7 @@ module "vpcs_this_region" { module "vpcs_home_region" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" for_each = toset(concat(tolist(var.home_region.connections), [var.tenant == null ? module.this.stage : format("%s-%s", module.this.tenant, module.this.stage)])) @@ -39,7 +39,7 @@ module "vpcs_home_region" { module "tgw_this_region" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" component = "tgw/hub" stage = var.this_region["tgw_stage_name"] @@ -49,7 +49,7 @@ module "tgw_this_region" { module "tgw_cross_region_connector" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" component = "tgw/cross-region-hub-connector" stage = var.this_region["tgw_stage_name"] @@ -59,7 +59,7 @@ module "tgw_cross_region_connector" { module "tgw_home_region" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.4.1" component = "tgw/hub" stage = var.home_region["tgw_stage_name"] diff --git a/modules/tgw/cross-region-spoke/routes-home-region.tf b/deprecated/tgw/cross-region-spoke/routes-home-region.tf similarity index 100% rename from modules/tgw/cross-region-spoke/routes-home-region.tf rename to deprecated/tgw/cross-region-spoke/routes-home-region.tf diff --git a/modules/tgw/cross-region-spoke/routes-this-region.tf b/deprecated/tgw/cross-region-spoke/routes-this-region.tf similarity index 100% rename from modules/tgw/cross-region-spoke/routes-this-region.tf rename to deprecated/tgw/cross-region-spoke/routes-this-region.tf diff --git a/modules/tgw/cross-region-spoke/variables.tf b/deprecated/tgw/cross-region-spoke/variables.tf similarity index 100% rename from modules/tgw/cross-region-spoke/variables.tf rename to deprecated/tgw/cross-region-spoke/variables.tf diff --git a/modules/tgw/cross-region-spoke/modules/vpc_routes/versions.tf b/deprecated/tgw/cross-region-spoke/versions.tf similarity index 83% rename from modules/tgw/cross-region-spoke/modules/vpc_routes/versions.tf rename to deprecated/tgw/cross-region-spoke/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/tgw/cross-region-spoke/modules/vpc_routes/versions.tf +++ b/deprecated/tgw/cross-region-spoke/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/deprecated/tgw/hub/README.md b/deprecated/tgw/hub/README.md new file mode 100644 index 000000000..9b88a8c2b --- /dev/null +++ b/deprecated/tgw/hub/README.md @@ -0,0 +1,121 @@ +# Component: `tgw/hub` + +This component is responsible for provisioning an [AWS Transit Gateway](https://aws.amazon.com/transit-gateway) `hub` that acts as a centralized gateway for connecting VPCs from other `spoke` accounts. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to configure and use this component: + +```yaml +components: + terraform: + tgw/hub: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: tgw-hub + eks_component_names: + - eks/cluster-blue + accounts_with_vpc: + - core-auto + - core-corp + - core-network + - plat-dev + - plat-staging + - plat-prod + - plat-sandbox + accounts_with_eks: + - plat-dev + - plat-staging + - plat-prod + - plat-sandbox +``` + +To provision the Transit Gateway and all related resources, run the following commands: + +```sh +atmos terraform plan tgw/hub -s --network +atmos terraform apply tgw/hub -s --network +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.1 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [tgw\_hub](#module\_tgw\_hub) | cloudposse/transit-gateway/aws | 0.9.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_environment\_name](#input\_account\_map\_environment\_name) | The name of the environment where `account_map` is provisioned | `string` | `"gbl"` | no | +| [account\_map\_stage\_name](#input\_account\_map\_stage\_name) | The name of the stage where `account_map` is provisioned | `string` | `"root"` | no | +| [account\_map\_tenant\_name](#input\_account\_map\_tenant\_name) | The name of the tenant where `account_map` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | +| [accounts\_with\_eks](#input\_accounts\_with\_eks) | Set of account names that have EKS | `set(string)` | n/a | yes | +| [accounts\_with\_vpc](#input\_accounts\_with\_vpc) | Set of account names that have VPC | `set(string)` | n/a | yes | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_names](#input\_eks\_component\_names) | The names of the eks components | `set(string)` |
[
"eks/cluster"
]
| no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [expose\_eks\_sg](#input\_expose\_eks\_sg) | Set true to allow EKS clusters to accept traffic from source accounts | `bool` | `true` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [eks](#output\_eks) | Accounts with EKS and EKSs information | +| [tgw\_config](#output\_tgw\_config) | Transit Gateway config | +| [transit\_gateway\_arn](#output\_transit\_gateway\_arn) | Transit Gateway ARN | +| [transit\_gateway\_id](#output\_transit\_gateway\_id) | Transit Gateway ID | +| [transit\_gateway\_route\_table\_id](#output\_transit\_gateway\_route\_table\_id) | Transit Gateway route table ID | +| [vpcs](#output\_vpcs) | Accounts with VPC and VPCs information | + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/tgw/hub) - Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/deprecated/tgw/hub/context.tf b/deprecated/tgw/hub/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/tgw/hub/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/deprecated/tgw/hub/main.tf b/deprecated/tgw/hub/main.tf new file mode 100644 index 000000000..c18ef7711 --- /dev/null +++ b/deprecated/tgw/hub/main.tf @@ -0,0 +1,35 @@ +# Create the Transit Gateway, route table associations/propagations, and static TGW routes in the `network` account. +# Enable sharing the Transit Gateway with the Organization using Resource Access Manager (RAM). +# If you would like to share resources with your organization or organizational units, +# then you must use the AWS RAM console or CLI command to enable sharing with AWS Organizations. +# When you share resources within your organization, +# AWS RAM does not send invitations to principals. Principals in your organization get access to shared resources without exchanging invitations. +# https://docs.aws.amazon.com/ram/latest/userguide/getting-started-sharing.html + +module "tgw_hub" { + source = "cloudposse/transit-gateway/aws" + version = "0.9.1" + + ram_resource_share_enabled = true + route_keys_enabled = true + + create_transit_gateway = true + create_transit_gateway_route_table = true + create_transit_gateway_vpc_attachment = false + create_transit_gateway_route_table_association_and_propagation = false + + config = {} + + context = module.this.context +} + +locals { + tgw_config = { + existing_transit_gateway_id = module.tgw_hub.transit_gateway_id + existing_transit_gateway_route_table_id = module.tgw_hub.transit_gateway_route_table_id + vpcs = module.vpc + eks = module.eks + expose_eks_sg = var.expose_eks_sg + eks_component_names = var.eks_component_names + } +} diff --git a/deprecated/tgw/hub/outputs.tf b/deprecated/tgw/hub/outputs.tf new file mode 100644 index 000000000..a34a0815d --- /dev/null +++ b/deprecated/tgw/hub/outputs.tf @@ -0,0 +1,29 @@ +output "transit_gateway_arn" { + value = module.tgw_hub.transit_gateway_arn + description = "Transit Gateway ARN" +} + +output "transit_gateway_id" { + value = module.tgw_hub.transit_gateway_id + description = "Transit Gateway ID" +} + +output "transit_gateway_route_table_id" { + value = module.tgw_hub.transit_gateway_route_table_id + description = "Transit Gateway route table ID" +} + +output "vpcs" { + value = module.vpc + description = "Accounts with VPC and VPCs information" +} + +output "eks" { + value = module.eks + description = "Accounts with EKS and EKSs information" +} + +output "tgw_config" { + value = local.tgw_config + description = "Transit Gateway config" +} diff --git a/modules/eks/efs-controller/providers.tf b/deprecated/tgw/hub/providers.tf similarity index 100% rename from modules/eks/efs-controller/providers.tf rename to deprecated/tgw/hub/providers.tf diff --git a/deprecated/tgw/hub/remote-state.tf b/deprecated/tgw/hub/remote-state.tf new file mode 100644 index 000000000..775fd7825 --- /dev/null +++ b/deprecated/tgw/hub/remote-state.tf @@ -0,0 +1,62 @@ +locals { + accounts_with_eks = { + for account in var.accounts_with_eks : + account => module.account_map.outputs.account_info_map[account] + } + + accounts_with_vpc = { + for account in var.accounts_with_vpc : + account => module.account_map.outputs.account_info_map[account] + } + + # Create a map of accounts (- or ) and components + eks_remote_states = { + for account_component in setproduct(keys(local.accounts_with_eks), var.eks_component_names) : + join("-", account_component) => { + account = account_component[0] + component = account_component[1] + } + } +} + +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + component = "account-map" + environment = var.account_map_environment_name + stage = var.account_map_stage_name + tenant = coalesce(var.account_map_tenant_name, module.this.tenant) + + context = module.this.context +} + +module "vpc" { + for_each = local.accounts_with_vpc + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + component = "vpc" + stage = each.value.stage + tenant = lookup(each.value, "tenant", null) + + context = module.this.context +} + +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + for_each = local.eks_remote_states + + component = each.value.component + stage = try(split("-", each.value.account)[1], each.value.account) + tenant = try(split("-", each.value.account)[0], null) + + defaults = { + eks_cluster_managed_security_group_id = null + } + + context = module.this.context +} diff --git a/deprecated/tgw/hub/variables.tf b/deprecated/tgw/hub/variables.tf new file mode 100644 index 000000000..889ad0d16 --- /dev/null +++ b/deprecated/tgw/hub/variables.tf @@ -0,0 +1,48 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "accounts_with_vpc" { + type = set(string) + description = "Set of account names that have VPC" +} + +variable "accounts_with_eks" { + type = set(string) + description = "Set of account names that have EKS" +} + +variable "expose_eks_sg" { + type = bool + description = "Set true to allow EKS clusters to accept traffic from source accounts" + default = true +} + +variable "eks_component_names" { + type = set(string) + description = "The names of the eks components" + default = ["eks/cluster"] +} + +variable "account_map_environment_name" { + type = string + description = "The name of the environment where `account_map` is provisioned" + default = "gbl" +} + +variable "account_map_stage_name" { + type = string + description = "The name of the stage where `account_map` is provisioned" + default = "root" +} + +variable "account_map_tenant_name" { + type = string + description = <<-EOT + The name of the tenant where `account_map` is provisioned. + + If the `tenant` label is not used, leave this as `null`. + EOT + default = null +} diff --git a/deprecated/tgw/hub/versions.tf b/deprecated/tgw/hub/versions.tf new file mode 100644 index 000000000..f0e7120a6 --- /dev/null +++ b/deprecated/tgw/hub/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.1" + } + } +} diff --git a/deprecated/tgw/spoke/README.md b/deprecated/tgw/spoke/README.md new file mode 100644 index 000000000..605510858 --- /dev/null +++ b/deprecated/tgw/spoke/README.md @@ -0,0 +1,136 @@ +# Component: `tgw/spoke` + +This component is responsible for provisioning [AWS Transit Gateway](https://aws.amazon.com/transit-gateway) attachments to connect VPCs in a `spoke` account to different accounts through a central `hub`. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to configure and use this component: + +stacks/catalog/tgw/spoke.yaml + +```yaml +components: + terraform: + tgw/spoke-defaults: + metadata: + type: abstract + component: tgw/spoke + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: tgw-spoke + tags: + Team: sre + Service: tgw-spoke + + tgw/spoke: + metadata: + inherits: + - tgw/spoke-defaults + vars: + connections: + - core-network + - core-auto +``` + +stacks/ue2/dev.yaml + +```yaml +import: + - catalog/tgw/spoke + +components: + terraform: + tgw/spoke: + vars: + # use when there is not an EKS cluster in the stack + expose_eks_sg: false + # override default connections + connections: + - core-network + - core-auto + - plat-staging + +``` + +To provision the attachments for a spoke account: + +```sh +atmos terraform plan tgw/spoke -s -- +atmos terraform apply tgw/spoke -s -- +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.1 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [tgw\_hub](#module\_tgw\_hub) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [tgw\_hub\_role](#module\_tgw\_hub\_role) | ../../account-map/modules/iam-roles | n/a | +| [tgw\_hub\_routes](#module\_tgw\_hub\_routes) | cloudposse/transit-gateway/aws | 0.9.1 | +| [tgw\_spoke\_vpc\_attachment](#module\_tgw\_spoke\_vpc\_attachment) | ./modules/standard_vpc_attachment | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [connections](#input\_connections) | List of accounts to connect to | `list(string)` | n/a | yes | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_names](#input\_eks\_component\_names) | The names of the eks components | `set(string)` |
[
"eks/cluster"
]
| no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [expose\_eks\_sg](#input\_expose\_eks\_sg) | Set true to allow EKS clusters to accept traffic from source accounts | `bool` | `true` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [tgw\_hub\_component\_name](#input\_tgw\_hub\_component\_name) | The name of the transit-gateway component | `string` | `"tgw/hub"` | no | +| [tgw\_hub\_environment\_name](#input\_tgw\_hub\_environment\_name) | The name of the environment where `tgw/gateway` is provisioned | `string` | `"ue2"` | no | +| [tgw\_hub\_stage\_name](#input\_tgw\_hub\_stage\_name) | The name of the stage where `tgw/gateway` is provisioned | `string` | `"network"` | no | +| [tgw\_hub\_tenant\_name](#input\_tgw\_hub\_tenant\_name) | The name of the tenant where `tgw/hub` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | + +## Outputs + +No outputs. + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/tgw) - Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/deprecated/tgw/spoke/context.tf b/deprecated/tgw/spoke/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/tgw/spoke/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/deprecated/tgw/spoke/main.tf b/deprecated/tgw/spoke/main.tf new file mode 100644 index 000000000..ff1175909 --- /dev/null +++ b/deprecated/tgw/spoke/main.tf @@ -0,0 +1,49 @@ +# Create the Transit Gateway, route table associations/propagations, and static TGW routes in the `network` account. +# Enable sharing the Transit Gateway with the Organization using Resource Access Manager (RAM). +# If you would like to share resources with your organization or organizational units, +# then you must use the AWS RAM console or CLI command to enable sharing with AWS Organizations. +# When you share resources within your organization, +# AWS RAM does not send invitations to principals. Principals in your organization get access to shared resources without exchanging invitations. +# https://docs.aws.amazon.com/ram/latest/userguide/getting-started-sharing.html + +locals { + spoke_account = module.this.tenant != null ? format("%s-%s", module.this.tenant, module.this.stage) : module.this.stage +} + +module "tgw_hub_routes" { + source = "cloudposse/transit-gateway/aws" + version = "0.9.1" + + providers = { + aws = aws.tgw-hub + } + + ram_resource_share_enabled = false + route_keys_enabled = false + + create_transit_gateway = false + create_transit_gateway_route_table = false + create_transit_gateway_vpc_attachment = false + create_transit_gateway_route_table_association_and_propagation = true + + config = { + (local.spoke_account) = module.tgw_spoke_vpc_attachment.tg_config, + } + + existing_transit_gateway_route_table_id = module.tgw_hub.outputs.transit_gateway_route_table_id + + context = module.this.context +} + +module "tgw_spoke_vpc_attachment" { + source = "./modules/standard_vpc_attachment" + + owning_account = local.spoke_account + + tgw_config = module.tgw_hub.outputs.tgw_config + connections = var.connections + expose_eks_sg = var.expose_eks_sg + eks_component_names = var.eks_component_names + + context = module.this.context +} diff --git a/deprecated/tgw/spoke/modules/standard_vpc_attachment/context.tf b/deprecated/tgw/spoke/modules/standard_vpc_attachment/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/deprecated/tgw/spoke/modules/standard_vpc_attachment/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/deprecated/tgw/spoke/modules/standard_vpc_attachment/main.tf b/deprecated/tgw/spoke/modules/standard_vpc_attachment/main.tf new file mode 100644 index 000000000..ac5df9e8c --- /dev/null +++ b/deprecated/tgw/spoke/modules/standard_vpc_attachment/main.tf @@ -0,0 +1,66 @@ +locals { + vpcs = var.tgw_config.vpcs + own_vpc = local.vpcs[var.owning_account].outputs + connected_accounts = var.connections + + # Create a list of all of the EKS security groups + own_eks_sgs = compact([ + for account_component in setproduct([var.owning_account], var.eks_component_names) : + try(var.tgw_config.eks[join("-", account_component)].outputs.eks_cluster_managed_security_group_id, "") + ]) + + # Create a map of accounts (- or ) and the security group to add ingress rules for + connected_accounts_allow_ingress = { + for account_sg in setproduct(local.connected_accounts, local.own_eks_sgs) : + account_sg[0] => { + account = account_sg[0] + sg = account_sg[1] + } + } + + allowed_cidrs = [ + for k, v in local.vpcs : v.outputs.vpc_cidr + if contains(local.connected_accounts, k) && k != var.owning_account + ] +} + +module "standard_vpc_attachment" { + source = "cloudposse/transit-gateway/aws" + version = "0.9.1" + + existing_transit_gateway_id = var.tgw_config.existing_transit_gateway_id + existing_transit_gateway_route_table_id = var.tgw_config.existing_transit_gateway_route_table_id + + route_keys_enabled = true + create_transit_gateway = false + create_transit_gateway_route_table = false + create_transit_gateway_vpc_attachment = true + create_transit_gateway_route_table_association_and_propagation = false + + config = { + (var.owning_account) = { + vpc_id = local.own_vpc.vpc_id + vpc_cidr = local.own_vpc.vpc_cidr + subnet_ids = local.own_vpc.private_subnet_ids + subnet_route_table_ids = local.own_vpc.private_route_table_ids + route_to = null + static_routes = null + transit_gateway_vpc_attachment_id = null + route_to_cidr_blocks = local.allowed_cidrs + } + } + + context = module.this.context +} + +resource "aws_security_group_rule" "ingress_cidr_blocks" { + for_each = var.expose_eks_sg ? local.connected_accounts_allow_ingress : {} + + description = "Allow inbound traffic from ${each.key}" + type = "ingress" + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_blocks = [local.vpcs[each.value.account].outputs.vpc_cidr] + security_group_id = each.value.sg +} diff --git a/deprecated/tgw/spoke/modules/standard_vpc_attachment/outputs.tf b/deprecated/tgw/spoke/modules/standard_vpc_attachment/outputs.tf new file mode 100644 index 000000000..538a37725 --- /dev/null +++ b/deprecated/tgw/spoke/modules/standard_vpc_attachment/outputs.tf @@ -0,0 +1,14 @@ +output "tg_config" { + ## Fit tg config type https://github.com/cloudposse/terraform-aws-transit-gateway#input_config + value = { + vpc_id = null + vpc_cidr = null + subnet_ids = null + subnet_route_table_ids = null + route_to = null + route_to_cidr_blocks = null + static_routes = null + transit_gateway_vpc_attachment_id = module.standard_vpc_attachment.transit_gateway_vpc_attachment_ids[var.owning_account] + } + description = "Transit Gateway configuration formatted for handling" +} diff --git a/deprecated/tgw/spoke/modules/standard_vpc_attachment/variables.tf b/deprecated/tgw/spoke/modules/standard_vpc_attachment/variables.tf new file mode 100644 index 000000000..d8c88fb3b --- /dev/null +++ b/deprecated/tgw/spoke/modules/standard_vpc_attachment/variables.tf @@ -0,0 +1,32 @@ +variable "owning_account" { + type = string + default = null + description = "The name of the account that owns the VPC being attached" +} + +variable "tgw_config" { + type = object({ + existing_transit_gateway_id = string + existing_transit_gateway_route_table_id = string + vpcs = any + eks = any + }) + description = "Object to pass common data from root module to this submodule. See root module for details" +} + +variable "connections" { + type = list(string) + description = "List of accounts to connect to" +} + +variable "expose_eks_sg" { + type = bool + description = "Set true to allow EKS clusters to accept traffic from source accounts" + default = true +} + +variable "eks_component_names" { + type = set(string) + description = "The names of the eks components" + default = ["eks/cluster"] +} diff --git a/deprecated/tgw/spoke/modules/standard_vpc_attachment/versions.tf b/deprecated/tgw/spoke/modules/standard_vpc_attachment/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/deprecated/tgw/spoke/modules/standard_vpc_attachment/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/deprecated/tgw/spoke/outputs.tf b/deprecated/tgw/spoke/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/deprecated/tgw/spoke/providers.tf b/deprecated/tgw/spoke/providers.tf new file mode 100644 index 000000000..bfa49d241 --- /dev/null +++ b/deprecated/tgw/spoke/providers.tf @@ -0,0 +1,70 @@ +provider "aws" { + region = var.region + + profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + + dynamic "assume_role" { + for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + content { + role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} + +variable "import_profile_name" { + type = string + default = null + description = "AWS Profile name to use when importing a resource" +} + +variable "import_role_arn" { + type = string + default = null + description = "IAM Role ARN to use when importing a resource" +} + +provider "aws" { + alias = "tgw-hub" + region = var.region + + assume_role { + role_arn = coalesce(var.import_role_arn, module.tgw_hub_role.terraform_role_arn) + } +} + +variable "tgw_hub_environment_name" { + type = string + description = "The name of the environment where `tgw/gateway` is provisioned" + default = "ue2" +} + +variable "tgw_hub_stage_name" { + type = string + description = "The name of the stage where `tgw/gateway` is provisioned" + default = "network" +} + +variable "tgw_hub_tenant_name" { + type = string + description = <<-EOT + The name of the tenant where `tgw/hub` is provisioned. + + If the `tenant` label is not used, leave this as `null`. + EOT + default = null +} + +module "tgw_hub_role" { + source = "../../account-map/modules/iam-roles" + + stage = var.tgw_hub_stage_name + environment = var.tgw_hub_environment_name + tenant = var.tgw_hub_tenant_name + + context = module.this.context +} diff --git a/deprecated/tgw/spoke/remote-state.tf b/deprecated/tgw/spoke/remote-state.tf new file mode 100644 index 000000000..d074fca2d --- /dev/null +++ b/deprecated/tgw/spoke/remote-state.tf @@ -0,0 +1,11 @@ +module "tgw_hub" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + component = var.tgw_hub_component_name + stage = var.tgw_hub_stage_name + environment = var.tgw_hub_environment_name + tenant = var.tgw_hub_tenant_name + + context = module.this.context +} diff --git a/deprecated/tgw/spoke/variables.tf b/deprecated/tgw/spoke/variables.tf new file mode 100644 index 000000000..a34bd9890 --- /dev/null +++ b/deprecated/tgw/spoke/variables.tf @@ -0,0 +1,27 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "connections" { + type = list(string) + description = "List of accounts to connect to" +} + +variable "tgw_hub_component_name" { + type = string + description = "The name of the transit-gateway component" + default = "tgw/hub" +} + +variable "expose_eks_sg" { + type = bool + description = "Set true to allow EKS clusters to accept traffic from source accounts" + default = true +} + +variable "eks_component_names" { + type = set(string) + description = "The names of the eks components" + default = ["eks/cluster"] +} diff --git a/deprecated/tgw/spoke/versions.tf b/deprecated/tgw/spoke/versions.tf new file mode 100644 index 000000000..f0e7120a6 --- /dev/null +++ b/deprecated/tgw/spoke/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.1" + } + } +} diff --git a/docs/targets.md b/docs/targets.md index e44b8acf8..4a98e523f 100644 --- a/docs/targets.md +++ b/docs/targets.md @@ -1,5 +1,7 @@ + ## Makefile Targets + ```text Available targets: @@ -11,4 +13,5 @@ Available targets: upstream-component Upstream a given component ``` + diff --git a/docs/terraform.md b/docs/terraform.md deleted file mode 100644 index 128ef8b46..000000000 --- a/docs/terraform.md +++ /dev/null @@ -1,25 +0,0 @@ - -## Requirements - -No requirements. - -## Providers - -No providers. - -## Modules - -No modules. - -## Resources - -No resources. - -## Inputs - -No inputs. - -## Outputs - -No outputs. - diff --git a/mixins/README.md b/mixins/README.md index b09a0f5a5..8818bf35b 100644 --- a/mixins/README.md +++ b/mixins/README.md @@ -1,12 +1,14 @@ # Terraform Mixins -A Terraform mixin (inspired by the [concept of the same name in OOP languages such as Python and Ruby](https://en.wikipedia.org/wiki/Mixin)) -is a Terraform configuration file that can be dropped into a root-level module, i.e. a component, in order to add additional +A Terraform mixin (inspired by the +[concept of the same name in OOP languages such as Python and Ruby](https://en.wikipedia.org/wiki/Mixin)) is a Terraform +configuration file that can be dropped into a root-level module, i.e. a component, in order to add additional functionality. Mixins are meant to encourage code reuse, leading to more simple components with less code repetition between component to component. + ## Mixin: `infra-state.mixin.tf` @@ -34,6 +36,11 @@ configuration, specifying which component the resources belong to. It's important to note that all modules and resources within the component then need to use `module.introspection.context` and `module.introspection.tags`, respectively, rather than `module.this.context` and `module.this.tags`. +## Mixin: `provider-awsutils.mixin.tf` + +This mixin is meant to be added to a terraform module that wants to use the awsutils provider. +It assumes the standard `providers.tf` file is present in the module. + ## Mixin: `sops.mixin.tf` This mixin is meant to be added to Terraform EKS components which are used in a cluster where sops-secrets-operator (see: https://github.com/isindir/sops-secrets-operator) @@ -47,3 +54,4 @@ etc. That is, that it has the following characteristics: 2. Does not already instantiate a Kubernetes provider (only the Helm provider is necessary, typically, for EKS components). + diff --git a/mixins/github-actions-iam-policy/ecr/github-actions-iam-policy.tf b/mixins/github-actions-iam-policy/ecr/github-actions-iam-policy.tf index ce8efa072..20299a468 100644 --- a/mixins/github-actions-iam-policy/ecr/github-actions-iam-policy.tf +++ b/mixins/github-actions-iam-policy/ecr/github-actions-iam-policy.tf @@ -1,37 +1,54 @@ locals { - github_actions_iam_policy = join("", data.aws_iam_policy_document.github_actions_iam_policy.*.json) + enabled = module.this.enabled + github_actions_iam_policy = data.aws_iam_policy_document.github_actions_iam_policy.json + ecr_resources_static = [for k, v in module.ecr.repository_arn_map : v] + ecr_resources_wildcard = [for k, v in module.ecr.repository_arn_map : "${v}/*"] + resources = concat(local.ecr_resources_static, local.ecr_resources_wildcard) } data "aws_iam_policy_document" "github_actions_iam_policy" { - count = var.github_actions_iam_role_enabled ? 1 : 0 - - # Permissions copied from https://docs.aws.amazon.com/AmazonECR/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-AmazonEC2ContainerRegistryPowerUser - # This policy grants administrative permissions that allow IAM users to read and write to repositories, - # but doesn't allow them to delete repositories or change the policy documents that are applied to them. statement { - sid = "AmazonEC2ContainerRegistryPowerUser" + sid = "AllowECRPermissions" effect = "Allow" actions = [ - "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:DescribeImages", + "ecr:BatchDeleteImage", "ecr:BatchGetImage", + "ecr:CompleteLayerUpload", + "ecr:DeleteLifecyclePolicy", + "ecr:DescribeImages", + "ecr:DescribeImageScanFindings", + "ecr:DescribeRepositories", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", "ecr:GetLifecyclePolicy", "ecr:GetLifecyclePolicyPreview", - "ecr:ListTagsForResource", - "ecr:DescribeImageScanFindings", + "ecr:GetRepositoryPolicy", "ecr:InitiateLayerUpload", - "ecr:UploadLayerPart", - "ecr:CompleteLayerUpload", + "ecr:ListImages", "ecr:PutImage", + "ecr:PutImageScanningConfiguration", + "ecr:PutImageTagMutability", + "ecr:PutLifecyclePolicy", + "ecr:StartImageScan", + "ecr:StartLifecyclePolicyPreview", + "ecr:TagResource", + "ecr:UntagResource", + "ecr:UploadLayerPart", ] + resources = local.resources + } - #bridgecrew:skip=BC_AWS_IAM_57:OK to allow write access to all ECRs because ECRs have their own access policies - # and this policy prohibits the user from making changes to the access policy. + # required as minimum permissions for pushing and logging into a public ECR repository + # https://github.com/aws-actions/amazon-ecr-login#permissions + # https://docs.aws.amazon.com/AmazonECR/latest/public/docker-push-ecr-image.html + statement { + sid = "AllowEcrGetAuthorizationToken" + effect = "Allow" + actions = [ + "ecr:GetAuthorizationToken", + "sts:GetServiceBearerToken" + ] resources = ["*"] } } diff --git a/mixins/github-actions-iam-policy/github-actions-iam-policy.tf b/mixins/github-actions-iam-policy/github-actions-iam-policy.tf new file mode 100644 index 000000000..deb44132a --- /dev/null +++ b/mixins/github-actions-iam-policy/github-actions-iam-policy.tf @@ -0,0 +1,34 @@ +## Custom IAM Policy for GitHub Actions +## Requires GitHub OIDC Component be deployed +## Usage: +## in your stack configuration: +# components: +# terraform: +# foo: +# vars: +# github_actions_iam_role_enabled: true +# github_actions_allowed_repos: +# - MyOrg/MyRepo +# github_actions_iam_policy_statements: +# - Sid: "AllowAll" +# Action: [ +# "lambda:*", +# ] +# Effect: "Allow" +# Resource: ["*"] +# + + +variable "github_actions_iam_policy_statements" { + type = list(any) + default = [] +} + +locals { + enabled = module.this.enabled + policy = jsonencode({ + Version = "2012-10-17", + Statement = var.github_actions_iam_policy_statements + }) + github_actions_iam_policy = local.policy +} diff --git a/mixins/github-actions-iam-policy/s3-bucket/github-actions-iam-policy.tf b/mixins/github-actions-iam-policy/s3-bucket/github-actions-iam-policy.tf new file mode 100644 index 000000000..08c50f33a --- /dev/null +++ b/mixins/github-actions-iam-policy/s3-bucket/github-actions-iam-policy.tf @@ -0,0 +1,23 @@ +variable "github_actions_iam_actions" { + type = list(string) + default = [ + "s3:CreateMultipartUpload", + "s3:PutObject", + "s3:PutObjectAcl" + ] + description = "List of actions to permit `GitHub OIDC authenticated users` to perform on bucket and bucket prefixes" +} + + +locals { + github_actions_iam_policy = data.aws_iam_policy_document.github_actions_iam_policy.json +} + +data "aws_iam_policy_document" "github_actions_iam_policy" { + statement { + sid = "AllowS3UploadPermissions" + effect = "Allow" + actions = var.github_actions_iam_actions + resources = [module.s3_bucket.bucket_arn, "${module.s3_bucket.bucket_arn}/*"] + } +} diff --git a/mixins/github-actions-iam-role/README-github-action-iam-role.md b/mixins/github-actions-iam-role/README-github-action-iam-role.md index 1c8e0b6bb..c46fbf286 100644 --- a/mixins/github-actions-iam-role/README-github-action-iam-role.md +++ b/mixins/github-actions-iam-role/README-github-action-iam-role.md @@ -1,33 +1,32 @@ # Mixin: `github-actions-iam-role.mixin.tf` -This mixin component is responsible for creating an IAM role that can be assumed by a GitHub action for a specific purpose. -It requires that the `github-oidc-provider` component be installed in the same account, that -`components/terraform/account-map/modules/team-assume-role-policy/github-assume-role-policy.mixin.tf` -is present in the repository, and that the component using this mixin contains a file (by convention named -`github-actions-iam-policy.tf`) which defines a JSON policy document that will be attached to the IAM role, -contained in a local variable named `github_actions_iam_policy`. It is up to the component using this mixin -to define the policy to be associated with the role. The policy should be as restrictive as possible. - -At this time, only one role can be created per component (per account, per region). Generated role names -include all the `null-label` labels, so it is possible to create multiple roles in the same account, -but not multiple roles in the same component in the same region with different policies. -This limitation of the mixin is somewhat intentional, in that each role should be created for a specific -component, and component can create its own specific role. If this limitation turns -out to be truly burdensome, note that `aws-teams` also supports GitHub actions assuming its roles. - +This mixin component is responsible for creating an IAM role that can be assumed by a GitHub action for a specific +purpose. It requires that the `github-oidc-provider` component be installed in the same account, that +`components/terraform/account-map/modules/team-assume-role-policy/github-assume-role-policy.mixin.tf` is present in the +repository, and that the component using this mixin contains a file (by convention named `github-actions-iam-policy.tf`) +which defines a JSON policy document that will be attached to the IAM role, contained in a local variable named +`github_actions_iam_policy`. It is up to the component using this mixin to define the policy to be associated with the +role. The policy should be as restrictive as possible. + +At this time, only one role can be created per component (per account, per region). Generated role names include all the +`null-label` labels, so it is possible to create multiple roles in the same account, but not multiple roles in the same +component in the same region with different policies. This limitation of the mixin is somewhat intentional, in that each +role should be created for a specific component, and component can create its own specific role. If this limitation +turns out to be truly burdensome, note that `aws-teams` also supports GitHub actions assuming its roles. ## Usage **Stack Level**: Global or Regional This mixin provisions a specific IAM role that can be assumed by a GitHub action for a specific purpose, analogous to -how [EKS IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) +how +[EKS IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) works for EKS. ### Define the role policy -Create a file named `github-actions-iam-policy.tf` that defines the desired policy for the role and saves it -as a JSON string in a local variable named `github_actions_iam_policy`. For example: +Create a file named `github-actions-iam-policy.tf` that defines the desired policy for the role and saves it as a JSON +string in a local variable named `github_actions_iam_policy`. For example: ```hcl locals { @@ -48,11 +47,10 @@ data "aws_iam_policy_document" "github_actions_iam_policy" { ### Create the role alongside the component -Define values for the variables defined in `github-actions-iam-role.mixin.tf` in the stack for the component. -Most importantly, set `github_actions_allowed_repos` to the list of GitHub repositories where installed -GitHub actions will be allowed to assume the role. Wildcards are allowed, so you can allow all repositories -in your organization by setting `github_actions_allowed_repos = ["/*"]`. - +Define values for the variables defined in `github-actions-iam-role.mixin.tf` in the stack for the component. Most +importantly, set `github_actions_allowed_repos` to the list of GitHub repositories where installed GitHub actions will +be allowed to assume the role. Wildcards are allowed, so you can allow all repositories in your organization by setting +`github_actions_allowed_repos = ["/*"]`. ```yaml components: @@ -71,26 +69,25 @@ components: #### Add required workflow permissions -In the GitHub action workflow, add required permissions at the top of the -workflow, or within the job. See the [GitHub documentation](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings) +In the GitHub action workflow, add required permissions at the top of the workflow, or within the job. See the +[GitHub documentation](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings) for more details. ```yaml permissions: id-token: write # This is required for requesting the JWT - contents: read # This is required for actions/checkout + contents: read # This is required for actions/checkout ``` #### Configure settings via environment variables -Although you can configure the settings in various ways, including using -GitHub Secrets and Environments, for a balance of simplicity and visibility -we recommend configuration by hard-coding settings in the following environment variables +Although you can configure the settings in various ways, including using GitHub Secrets and Environments, for a balance +of simplicity and visibility we recommend configuration by hard-coding settings in the following environment variables at the top the workflow: ```yaml env: - AWS_REGION: us-east-1 # The AWS region where the workflow should run + AWS_REGION: us-east-1 # The AWS region where the workflow should run ECR_REPOSITORY: infrastructure # The ECR repository where the workflow should push the image ECR_REGISTRY: 123456789012.dkr.ecr.us-east-1.amazonaws.com # The ECR registry where the workflow should push the image GHA_IAM_ROLE: arn:aws:iam::123456789012:role/eg-mgmt-use1-art-gha # The ARN of the IAM role to assume @@ -99,10 +96,10 @@ env: Then add the following step to assume the role: ```yaml - - name: Configure AWS credentials for ECR - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ env.GHA_IAM_ROLE }} - role-session-name: infra-gha-docker-build-and-push # This can be any name. It shows up in audit logs. - aws-region: ${{ env.AWS_REGION }} +- name: Configure AWS credentials for ECR + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ env.GHA_IAM_ROLE }} + role-session-name: infra-gha-docker-build-and-push # This can be any name. It shows up in audit logs. + aws-region: ${{ env.AWS_REGION }} ``` diff --git a/mixins/github-actions-iam-role/github-actions-iam-role.mixin.tf b/mixins/github-actions-iam-role/github-actions-iam-role.mixin.tf index 4294f1aac..732d9d68b 100644 --- a/mixins/github-actions-iam-role/github-actions-iam-role.mixin.tf +++ b/mixins/github-actions-iam-role/github-actions-iam-role.mixin.tf @@ -1,3 +1,9 @@ +# This mixin creates an IAM role that a GitHub Action Runner can assume, +# with appropriate controls. Usually this file is included in the component +# that needs to allow the GitHub Action (GHA) to operate with it. For example, +# the `ecr` component includes this to create a role that will +# allow the GHA to push images to the ECR it creates. + # This mixin requires that a local variable named `github_actions_iam_policy` be defined # and its value to be a JSON IAM Policy Document defining the permissions for the role. # It also requires that the `github-oidc-provider` has been previously installed and the @@ -27,6 +33,11 @@ variable "github_actions_iam_role_attributes" { default = [] } +variable "privileged" { + type = bool + description = "True if the default provider already has access to the backend" + default = false +} locals { github_actions_iam_role_enabled = module.this.enabled && var.github_actions_iam_role_enabled && length(var.github_actions_allowed_repos) > 0 @@ -46,6 +57,7 @@ module "gha_assume_role" { source = "../account-map/modules/team-assume-role-policy" trusted_github_repos = var.github_actions_allowed_repos + privileged = var.privileged context = module.gha_role_name.context } diff --git a/mixins/provider-awsutils.mixin.tf b/mixins/provider-awsutils.mixin.tf new file mode 100644 index 000000000..9df2a64a0 --- /dev/null +++ b/mixins/provider-awsutils.mixin.tf @@ -0,0 +1,21 @@ +# <-- BEGIN DOC --> +# +# This mixin is meant to be added to a terraform module that wants to use the awsutils provider. +# It assumes the standard `providers.tf` file is present in the module. +# +# <-- END DOC --> + +provider "awsutils" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} diff --git a/mixins/provider-datadog.tf b/mixins/provider-datadog.tf new file mode 100644 index 000000000..8db220f1f --- /dev/null +++ b/mixins/provider-datadog.tf @@ -0,0 +1,12 @@ +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + region = var.region + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} diff --git a/mixins/provider-helm.tf b/mixins/provider-helm.tf index 9bb5edb6f..91cc7f6d4 100644 --- a/mixins/provider-helm.tf +++ b/mixins/provider-helm.tf @@ -21,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -42,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -51,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -101,16 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] # Provide dummy configuration for the case where the EKS cluster is not available. - certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") - eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -121,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -146,15 +180,16 @@ provider "helm" { provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/mixins/providers-aws-superadmin.tf b/mixins/providers-aws-superadmin.tf new file mode 100644 index 000000000..dc58d9a25 --- /dev/null +++ b/mixins/providers-aws-superadmin.tf @@ -0,0 +1,3 @@ +provider "aws" { + region = var.region +} diff --git a/mixins/providers.depth-1.tf b/mixins/providers.depth-1.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/mixins/providers.depth-1.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/mixins/providers.depth-2.tf b/mixins/providers.depth-2.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/mixins/providers.depth-2.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/access-analyzer/README.md b/modules/access-analyzer/README.md new file mode 100644 index 000000000..896e06095 --- /dev/null +++ b/modules/access-analyzer/README.md @@ -0,0 +1,193 @@ +# Component: `access-analyzer` + +This component is responsible for configuring AWS Identity and Access Management Access Analyzer within an AWS +Organization. + +IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM +roles, shared with an external entity. This lets you identify unintended access to your resources and data, which is a +security risk. IAM Access Analyzer identifies resources shared with external principals by using logic-based reasoning +to analyze the resource-based policies in your AWS environment. For each instance of a resource shared outside of your +account, IAM Access Analyzer generates a finding. Findings include information about the access and the external +principal granted to it. You can review findings to determine if the access is intended and safe or if the access is +unintended and a security risk. In addition to helping you identify resources shared with an external entity, you can +use IAM Access Analyzer findings to preview how your policy affects public and cross-account access to your resource +before deploying resource permissions. The findings are organized in a visual summary dashboard. The dashboard +highlights the split between public and cross-account access findings, and provides a breakdown of findings by resource +type. + +IAM Access Analyzer analyzes only policies applied to resources in the same AWS Region where it's enabled. To monitor +all resources in your AWS environment, you must create an analyzer to enable IAM Access Analyzer in each Region where +you're using supported AWS resources. + +AWS Identity and Access Management Access Analyzer provides the following capabilities: + +- IAM Access Analyzer external access analyzers help identify resources in your organization and accounts that are + shared with an external entity. + +- IAM Access Analyzer unused access analyzers help identify unused access in your organization and accounts. + +- IAM Access Analyzer validates IAM policies against policy grammar and AWS best practices. + +- IAM Access Analyzer custom policy checks help validate IAM policies against your specified security standards. + +- IAM Access Analyzer generates IAM policies based on access activity in your AWS CloudTrail logs. + +Here's a typical workflow: + +**Delegate Access Analyzer to another account**: From the Organization management (root) account, delegate +administration to a specific AWS account within your organization (usually the security account). + +**Create Access Analyzers in the Delegated Administrator Account**: Enable the Access Analyzers for external access and +unused access in the delegated administrator account. + +## Deployment Overview + +```yaml +components: + terraform: + access-analyzer/defaults: + metadata: + component: access-analyzer + type: abstract + vars: + enabled: true + global_environment: gbl + account_map_tenant: core + root_account_stage: root + delegated_administrator_account_name: core-mgt + accessanalyzer_service_principal: "access-analyzer.amazonaws.com" + accessanalyzer_organization_enabled: false + accessanalyzer_organization_unused_access_enabled: false + organizations_delegated_administrator_enabled: false +``` + +```yaml +import: + - catalog/access-analyzer/defaults + +components: + terraform: + access-analyzer/root: + metadata: + component: access-analyzer + inherits: + - access-analyzer/defaults + vars: + organizations_delegated_administrator_enabled: true +``` + +```yaml +import: + - catalog/access-analyzer/defaults + +components: + terraform: + access-analyzer/delegated-administrator: + metadata: + component: access-analyzer + inherits: + - access-analyzer/defaults + vars: + accessanalyzer_organization_enabled: true + accessanalyzer_organization_unused_access_enabled: true + unused_access_age: 30 +``` + +### Provisioning + +Delegate Access Analyzer to the security account: + +```bash +atmos terraform apply access-analyzer/root -s plat-dev-gbl-root +``` + +Provision Access Analyzers for external access and unused access in the delegated administrator (security) account in +each region: + +```bash +atmos terraform apply access-analyzer/delegated-administrator -s plat-dev-use1-mgt +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_accessanalyzer_analyzer.organization](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/accessanalyzer_analyzer) | resource | +| [aws_accessanalyzer_analyzer.organization_unused_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/accessanalyzer_analyzer) | resource | +| [aws_organizations_delegated_administrator.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/organizations_delegated_administrator) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [accessanalyzer\_organization\_enabled](#input\_accessanalyzer\_organization\_enabled) | Flag to enable the Organization Access Analyzer | `bool` | n/a | yes | +| [accessanalyzer\_organization\_unused\_access\_enabled](#input\_accessanalyzer\_organization\_unused\_access\_enabled) | Flag to enable the Organization unused access Access Analyzer | `bool` | n/a | yes | +| [accessanalyzer\_service\_principal](#input\_accessanalyzer\_service\_principal) | The Access Analyzer service principal for which you want to make the member account a delegated administrator | `string` | `"access-analyzer.amazonaws.com"` | no | +| [account\_map\_tenant](#input\_account\_map\_tenant) | The tenant where the `account_map` component required by remote-state is deployed | `string` | n/a | yes | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delegated\_administrator\_account\_name](#input\_delegated\_administrator\_account\_name) | The name of the account that is the AWS Organization Delegated Administrator account | `string` | n/a | yes | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [organization\_management\_account\_name](#input\_organization\_management\_account\_name) | The name of the AWS Organization management account | `string` | `null` | no | +| [organizations\_delegated\_administrator\_enabled](#input\_organizations\_delegated\_administrator\_enabled) | Flag to enable the Organization delegated administrator | `bool` | n/a | yes | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (management) account. This is used to lookup account IDs from account names
using the `account-map` component. | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [unused\_access\_age](#input\_unused\_access\_age) | The specified access age in days for which to generate findings for unused access | `number` | `30` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aws\_organizations\_delegated\_administrator\_id](#output\_aws\_organizations\_delegated\_administrator\_id) | AWS Organizations Delegated Administrator ID | +| [aws\_organizations\_delegated\_administrator\_status](#output\_aws\_organizations\_delegated\_administrator\_status) | AWS Organizations Delegated Administrator status | +| [organization\_accessanalyzer\_id](#output\_organization\_accessanalyzer\_id) | Organization Access Analyzer ID | +| [organization\_unused\_access\_accessanalyzer\_id](#output\_organization\_unused\_access\_accessanalyzer\_id) | Organization unused access Access Analyzer ID | + + +## References + +- https://aws.amazon.com/iam/access-analyzer/ +- https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html +- https://repost.aws/knowledge-center/iam-access-analyzer-organization +- https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/accessanalyzer_analyzer +- https://github.com/hashicorp/terraform-provider-aws/issues/19312 +- https://github.com/hashicorp/terraform-provider-aws/pull/19389 +- https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/organizations_delegated_administrator diff --git a/modules/access-analyzer/context.tf b/modules/access-analyzer/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/access-analyzer/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/access-analyzer/main.tf b/modules/access-analyzer/main.tf new file mode 100644 index 000000000..db6f89e38 --- /dev/null +++ b/modules/access-analyzer/main.tf @@ -0,0 +1,37 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map + org_delegated_administrator_account_id = local.account_map[var.delegated_administrator_account_name] +} + +resource "aws_accessanalyzer_analyzer" "organization" { + count = local.enabled && var.accessanalyzer_organization_enabled ? 1 : 0 + + analyzer_name = format("%s-organization", module.this.id) + type = "ORGANIZATION" + + tags = module.this.tags +} + +resource "aws_accessanalyzer_analyzer" "organization_unused_access" { + count = local.enabled && var.accessanalyzer_organization_unused_access_enabled ? 1 : 0 + + analyzer_name = format("%s-organization-unused-access", module.this.id) + type = "ORGANIZATION_UNUSED_ACCESS" + + configuration { + unused_access { + unused_access_age = var.unused_access_age + } + } + + tags = module.this.tags +} + +# Delegate Access Analyzer to the administrator account (usually the security account) +resource "aws_organizations_delegated_administrator" "default" { + count = local.enabled && var.organizations_delegated_administrator_enabled ? 1 : 0 + + account_id = local.org_delegated_administrator_account_id + service_principal = var.accessanalyzer_service_principal +} diff --git a/modules/access-analyzer/outputs.tf b/modules/access-analyzer/outputs.tf new file mode 100644 index 000000000..a7e70d6c7 --- /dev/null +++ b/modules/access-analyzer/outputs.tf @@ -0,0 +1,19 @@ +output "organization_accessanalyzer_id" { + value = one(aws_accessanalyzer_analyzer.organization[*].id) + description = "Organization Access Analyzer ID" +} + +output "organization_unused_access_accessanalyzer_id" { + value = one(aws_accessanalyzer_analyzer.organization_unused_access[*].id) + description = "Organization unused access Access Analyzer ID" +} + +output "aws_organizations_delegated_administrator_id" { + value = one(aws_organizations_delegated_administrator.default[*].id) + description = "AWS Organizations Delegated Administrator ID" +} + +output "aws_organizations_delegated_administrator_status" { + value = one(aws_organizations_delegated_administrator.default[*].status) + description = "AWS Organizations Delegated Administrator status" +} diff --git a/modules/access-analyzer/providers.tf b/modules/access-analyzer/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/access-analyzer/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/access-analyzer/remote-state.tf b/modules/access-analyzer/remote-state.tf new file mode 100644 index 000000000..ba717f1ab --- /dev/null +++ b/modules/access-analyzer/remote-state.tf @@ -0,0 +1,11 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + tenant = (var.account_map_tenant != "") ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + + context = module.this.context +} diff --git a/modules/access-analyzer/variables.tf b/modules/access-analyzer/variables.tf new file mode 100644 index 000000000..f6244ecd1 --- /dev/null +++ b/modules/access-analyzer/variables.tf @@ -0,0 +1,62 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "account_map_tenant" { + type = string + description = "The tenant where the `account_map` component required by remote-state is deployed" +} + +variable "delegated_administrator_account_name" { + type = string + description = "The name of the account that is the AWS Organization Delegated Administrator account" +} + +variable "global_environment" { + type = string + default = "gbl" + description = "Global environment name" +} + +variable "organization_management_account_name" { + type = string + default = null + description = "The name of the AWS Organization management account" +} + +variable "root_account_stage" { + type = string + default = "root" + description = <<-DOC + The stage name for the Organization root (management) account. This is used to lookup account IDs from account names + using the `account-map` component. + DOC +} + +variable "accessanalyzer_organization_enabled" { + type = bool + description = "Flag to enable the Organization Access Analyzer" +} + +variable "accessanalyzer_organization_unused_access_enabled" { + type = bool + description = "Flag to enable the Organization unused access Access Analyzer" +} + +variable "unused_access_age" { + type = number + description = "The specified access age in days for which to generate findings for unused access" + default = 30 +} + +variable "organizations_delegated_administrator_enabled" { + type = bool + description = "Flag to enable the Organization delegated administrator" +} + +variable "accessanalyzer_service_principal" { + type = string + description = "The Access Analyzer service principal for which you want to make the member account a delegated administrator" + default = "access-analyzer.amazonaws.com" +} diff --git a/modules/access-analyzer/versions.tf b/modules/access-analyzer/versions.tf new file mode 100644 index 000000000..b5920b7b1 --- /dev/null +++ b/modules/access-analyzer/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/account-map/README.md b/modules/account-map/README.md index c939de919..f8ce68877 100644 --- a/modules/account-map/README.md +++ b/modules/account-map/README.md @@ -1,14 +1,29 @@ +--- +tags: + - component/account-map + - layer/accounts + - provider/aws + - privileged +--- + # Component: `account-map` -This component is responsible for provisioning information only: it simply populates Terraform state with data (account ids, groups, and roles) that other root modules need via outputs. +This component is responsible for provisioning information only: it simply populates Terraform state with data (account +ids, groups, and roles) that other root modules need via outputs. + +## Pre-requisites + +- [account](https://docs.cloudposse.com/components/library/aws/account) must be provisioned before + [account-map](https://docs.cloudposse.com/components/library/aws/account-map) component ## Usage **Stack Level**: Global -Here is an example snippet for how to use this component. Include this snippet in the stack configuration for the management account -(typically `root`) in the management tenant/OU (usually something like `mgmt` or `core`) in the global region (`gbl`). You can include -the content directly, or create a `stacks/catalog/account-map.yaml` file and import it from there. +Here is an example snippet for how to use this component. Include this snippet in the stack configuration for the +management account (typically `root`) in the management tenant/OU (usually something like `mgmt` or `core`) in the +global region (`gbl`). You can include the content directly, or create a `stacks/catalog/account-map.yaml` file and +import it from there. ```yaml components: @@ -18,7 +33,7 @@ components: enabled: true # Set profiles_enabled to false unless we are using AWS config profiles for Terraform access. # When profiles_enabled is false, role_arn must be provided instead of profile in each terraform component provider. - # This is automatically handled by the component's `provider.tf` file in conjunction with + # This is automatically handled by the component's `provider.tf` file in conjunction with # the `account-map/modules/iam-roles` module. profiles_enabled: false root_account_aws_name: "aws-root" @@ -26,43 +41,47 @@ components: identity_account_account_name: identity dns_account_account_name: dns audit_account_account_name: audit - + # The following variables contain `format()` strings that take the labels from `null-label` # as arguments in the standard order. The default values are shown here, assuming - # the `null-label.label_order` is + # the `null-label.label_order` is # ["namespace", "tenant", "environment", "stage", "name", "attributes"] # Note that you can rearrange the order of the labels in the template by # using [explicit argument indexes](https://pkg.go.dev/fmt#hdr-Explicit_argument_indexes) just like in `go`. # `iam_role_arn_template_template` is the template for the template [sic] used to render Role ARNs. - # The template is first used to render a template for the account that takes only the role name. + # The template is first used to render a template for the account that takes only the role name. # Then that rendered template is used to create the final Role ARN for the account. iam_role_arn_template_template: "arn:%s:iam::%s:role/%s-%s-%s-%s-%%s" # `profile_template` is the template used to render AWS Profile names. profile_template: "%s-%s-%s-%s-%s" - ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [terraform](#requirement\_terraform) | >= 1.2.0 | | [aws](#requirement\_aws) | >= 4.9.0 | +| [local](#requirement\_local) | >= 1.3 | +| [utils](#requirement\_utils) | >= 1.10.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.9.0 | -| [local](#provider\_local) | n/a | +| [local](#provider\_local) | >= 1.3 | +| [utils](#provider\_utils) | >= 1.10.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [accounts](#module\_accounts) | cloudposse/stack-config/yaml//modules/remote-state | 1.0.0 | +| [accounts](#module\_accounts) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [atmos](#module\_atmos) | cloudposse/label/null | 0.25.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -72,40 +91,45 @@ components: | [local_file.account_info](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | | [aws_organizations_organization.organization](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/organizations_organization) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [utils_describe_stacks.team_roles](https://registry.terraform.io/providers/cloudposse/utils/latest/docs/data-sources/describe_stacks) | data source | +| [utils_describe_stacks.teams](https://registry.terraform.io/providers/cloudposse/utils/latest/docs/data-sources/describe_stacks) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | -| [artifacts\_account\_account\_name](#input\_artifacts\_account\_account\_name) | The stage name for the artifacts account | `string` | `"artifacts"` | no | +| [artifacts\_account\_account\_name](#input\_artifacts\_account\_account\_name) | The short name for the artifacts account | `string` | `"artifacts"` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [audit\_account\_account\_name](#input\_audit\_account\_account\_name) | The stage name for the audit account | `string` | `"audit"` | no | +| [audit\_account\_account\_name](#input\_audit\_account\_account\_name) | The short name for the audit account | `string` | `"audit"` | no | +| [aws\_config\_identity\_profile\_name](#input\_aws\_config\_identity\_profile\_name) | The AWS config profile name to use as `source_profile` for credentials. | `string` | `null` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [dns\_account\_account\_name](#input\_dns\_account\_account\_name) | The stage name for the primary DNS account | `string` | `"dns"` | no | +| [dns\_account\_account\_name](#input\_dns\_account\_account\_name) | The short name for the primary DNS account | `string` | `"dns"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [global\_environment\_name](#input\_global\_environment\_name) | Global environment name | `string` | `"gbl"` | no | | [iam\_role\_arn\_template\_template](#input\_iam\_role\_arn\_template\_template) | The template for the template used to render Role ARNs.
The template is first used to render a template for the account that takes only the role name.
Then that rendered template is used to create the final Role ARN for the account.
Default is appropriate when using `tenant` and default label order with `null-label`.
Use `"arn:%s:iam::%s:role/%s-%s-%s-%%s"` when not using `tenant`.

Note that if the `null-label` variable `label_order` is truncated or extended with additional labels, this template will
need to be updated to reflect the new number of labels. | `string` | `"arn:%s:iam::%s:role/%s-%s-%s-%s-%%s"` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [identity\_account\_account\_name](#input\_identity\_account\_account\_name) | The stage name for the account holding primary IAM roles | `string` | `"identity"` | no | +| [identity\_account\_account\_name](#input\_identity\_account\_account\_name) | The short name for the account holding primary IAM roles | `string` | `"identity"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [legacy\_terraform\_uses\_admin](#input\_legacy\_terraform\_uses\_admin) | If `true`, the legacy behavior of using the `admin` role rather than the `terraform` role in the
`root` and identity accounts will be preserved.
The default is to use the negations of the value of `terraform_dynamic_role_enabled`. | `bool` | `null` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [profile\_template](#input\_profile\_template) | The template used to render AWS Profile names.
Default is appropriate when using `tenant` and default label order with `null-label`.
Use `"%s-%s-%s-%s"` when not using `tenant`.

Note that if the `null-label` variable `label_order` is truncated or extended with additional labels, this template will
need to be updated to reflect the new number of labels. | `string` | `"%s-%s-%s-%s-%s"` | no | | [profiles\_enabled](#input\_profiles\_enabled) | Whether or not to enable profiles instead of roles for the backend. If true, profile must be set. If false, role\_arn must be set. | `bool` | `false` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [root\_account\_account\_name](#input\_root\_account\_account\_name) | The stage name for the root account | `string` | `"root"` | no | +| [root\_account\_account\_name](#input\_root\_account\_account\_name) | The short name for the root account | `string` | `"root"` | no | | [root\_account\_aws\_name](#input\_root\_account\_aws\_name) | The name of the root account as reported by AWS | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [terraform\_dynamic\_role\_enabled](#input\_terraform\_dynamic\_role\_enabled) | If true, the IAM role Terraform will assume will depend on the identity of the user running terraform | `bool` | `false` | no | +| [terraform\_role\_name\_map](#input\_terraform\_role\_name\_map) | Mapping of Terraform action (plan or apply) to aws-team-role name to assume for that action | `map(string)` |
{
"apply": "terraform",
"plan": "planner"
}
| no | ## Outputs @@ -116,13 +140,13 @@ components: | [artifacts\_account\_account\_name](#output\_artifacts\_account\_account\_name) | The short name for the artifacts account | | [audit\_account\_account\_name](#output\_audit\_account\_account\_name) | The short name for the audit account | | [aws\_partition](#output\_aws\_partition) | The AWS "partition" to use when constructing resource ARNs | -| [cicd\_profiles](#output\_cicd\_profiles) | A list of all SSO profiles used by cicd platforms | -| [cicd\_roles](#output\_cicd\_roles) | A list of all IAM roles used by cicd platforms | +| [cicd\_profiles](#output\_cicd\_profiles) | OBSOLETE: dummy results returned to avoid breaking code that depends on this output | +| [cicd\_roles](#output\_cicd\_roles) | OBSOLETE: dummy results returned to avoid breaking code that depends on this output | | [dns\_account\_account\_name](#output\_dns\_account\_account\_name) | The short name for the primary DNS account | | [eks\_accounts](#output\_eks\_accounts) | A list of all accounts in the AWS Organization that contain EKS clusters | | [full\_account\_map](#output\_full\_account\_map) | The map of account name to account ID (number). | -| [helm\_profiles](#output\_helm\_profiles) | A list of all SSO profiles used to run helm updates | -| [helm\_roles](#output\_helm\_roles) | A list of all IAM roles used to run helm updates | +| [helm\_profiles](#output\_helm\_profiles) | OBSOLETE: dummy results returned to avoid breaking code that depends on this output | +| [helm\_roles](#output\_helm\_roles) | OBSOLETE: dummy results returned to avoid breaking code that depends on this output | | [iam\_role\_arn\_templates](#output\_iam\_role\_arn\_templates) | Map of accounts to corresponding IAM Role ARN templates | | [identity\_account\_account\_name](#output\_identity\_account\_account\_name) | The short name for the account holding primary IAM roles | | [non\_eks\_accounts](#output\_non\_eks\_accounts) | A list of all accounts in the AWS Organization that do not contain EKS clusters | @@ -130,12 +154,17 @@ components: | [profiles\_enabled](#output\_profiles\_enabled) | Whether or not to enable profiles instead of roles for the backend | | [root\_account\_account\_name](#output\_root\_account\_account\_name) | The short name for the root account | | [root\_account\_aws\_name](#output\_root\_account\_aws\_name) | The name of the root account as reported by AWS | +| [terraform\_access\_map](#output\_terraform\_access\_map) | Mapping of team Role ARN to map of account name to terraform action role ARN to assume

For each team in `aws-teams`, look at every account and see if that team has access to the designated "apply" role.
If so, add an entry ` = "apply"` to the `terraform_access_map` entry for that team.
If not, see if it has access to the "plan" role, and if so, add a "plan" entry.
Otherwise, no entry is added. | +| [terraform\_dynamic\_role\_enabled](#output\_terraform\_dynamic\_role\_enabled) | True if dynamic role for Terraform is enabled | | [terraform\_profiles](#output\_terraform\_profiles) | A list of all SSO profiles used to run terraform updates | +| [terraform\_role\_name\_map](#output\_terraform\_role\_name\_map) | Mapping of Terraform action (plan or apply) to aws-team-role name to assume for that action | | [terraform\_roles](#output\_terraform\_roles) | A list of all IAM roles used to run terraform updates | + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/account-map) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/account-map) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/account-map/account-info.tftmpl b/modules/account-map/account-info.tftmpl index 3c13181a7..2bc82fe76 100644 --- a/modules/account-map/account-info.tftmpl +++ b/modules/account-map/account-info.tftmpl @@ -2,7 +2,7 @@ # This script is automatically generated by `atmos terraform account-map`. # Do not modify this script directly. Instead, modify the template file. -# Path: modules/account-map/account-info.tftmpl +# Path: components/terraform/account-map/account-info.tftmpl # CAUTION: this script is appended to other scripts, # so it must not destroy variables like `functions`. @@ -16,6 +16,12 @@ function namespace() { echo ${namespace} } +functions+=("source-profile") +function source-profile() { + echo ${source_profile} +} + + declare -A accounts # root account included @@ -62,10 +68,11 @@ function _account-roles() { printf "%s\n" "$${!account_roles[@]}" | sort } function account-roles() { - printf "$${CONFIG_NAMESPACE:+$${CONFIG_NAMESPACE}-}%s\n" $(_account-roles) + for role in $(_account-roles); do + printf "$${CONFIG_NAMESPACE:+$${CONFIG_NAMESPACE}: }%s -> $${CONFIG_NAMESPACE:+$${CONFIG_NAMESPACE}-}%s\n" $role "$${account_roles[$role]}" + done } - ########### non-template helpers ########### functions+=("account-profile") diff --git a/modules/account-map/dynamic-roles.tf b/modules/account-map/dynamic-roles.tf new file mode 100644 index 000000000..fe0d6c993 --- /dev/null +++ b/modules/account-map/dynamic-roles.tf @@ -0,0 +1,116 @@ +# The `utils_describe_stacks` data resources use the Cloud Posse Utils provider to describe Atmos stacks, and then +# we merge the results into `local.all_team_vars`. This is the same as running the following locally: +# ``` +# atmos describe stacks --components=aws-teams,aws-team-roles --component-types=terraform --sections=vars +# ``` +# The result of these stack descriptions includes all metadata for the given components. For example, we now +# can filter the result to find all stacks where either `aws-teams` or `aws-team-roles` are deployed. +# +# In particular, we can use this data to find the name of the account via `null-label` (defined by +# `null-label.descriptor_formats.account_name`, typically `-`) where team roles are deployed. +# We then determine which roles are provisioned and which teams can access any given role in any particular account. +# +# `descriptor_formats.account_name` is typically defined in `stacks/orgs/NAMESPACE/_defaults.yaml`, and if not +# defined, the stack name will default to `stage`.` +# +# If `namespace` is included in `descriptor_formats.account_name`, then we additionally filter to only stacks with +# the same `namespace` as `module.this.namespace`. See `local.stack_namespace_index` and `local.stack_namespace_index` +# +# https://atmos.tools/cli/commands/describe/stacks/ +# https://registry.terraform.io/providers/cloudposse/utils/latest/docs/data-sources/describe_stacks +data "utils_describe_stacks" "teams" { + count = local.dynamic_role_enabled ? 1 : 0 + + components = ["aws-teams"] + component_types = ["terraform"] + sections = ["vars"] +} + +data "utils_describe_stacks" "team_roles" { + count = local.dynamic_role_enabled ? 1 : 0 + + components = ["aws-team-roles"] + component_types = ["terraform"] + sections = ["vars"] +} + +locals { + dynamic_role_enabled = module.this.enabled && var.terraform_dynamic_role_enabled + + # `var.terraform_role_name_map` maps some team role in the `aws-team-roles` configuration to "plan" and some other team to "apply". + apply_role = var.terraform_role_name_map.apply + plan_role = var.terraform_role_name_map.plan + + # If a namespace is included with the stack name, only loop through stacks in the same namespace + # zero-based index showing position of the namespace in the stack name + stack_namespace_index = try(index(module.this.normalized_context.descriptor_formats.stack.labels, "namespace"), -1) + stack_has_namespace = local.stack_namespace_index >= 0 + stack_account_map = { for k, v in module.atmos : k => lookup(v.descriptors, "account_name", v.stage) } + + # We would like to use code like this: + # teams_stacks = local.dynamic_role_enabled ? { for k, v ... } : {} + # but that generates an error: "Inconsistent conditional result types" + # See https://github.com/hashicorp/terraform/issues/33303 + # To work around this, we have "empty" values that depend on the condition. + empty_map = { + true = null + false = {} + } + empty = local.empty_map[local.dynamic_role_enabled] + + # ASSUMPTIONS: The stack pattern is the same for all accounts and uses the same delimiter as null-label + teams_stacks = local.dynamic_role_enabled ? { + for k, v in yamldecode(data.utils_describe_stacks.teams[0].output) : k => v if !local.stack_has_namespace || try(split(module.this.delimiter, k)[local.stack_namespace_index] == module.this.namespace, false) + } : local.empty + + teams_vars = { for k, v in local.teams_stacks : k => v.components.terraform.aws-teams.vars if try(v.components.terraform.aws-teams.vars, null) != null } + teams_config = local.dynamic_role_enabled ? values(local.teams_vars)[0].teams_config : local.empty + team_names = [for k, v in local.teams_config : k if try(v.enabled, true)] + team_arns = { for team_name in local.team_names : team_name => format(local.iam_role_arn_templates[local.account_role_map.identity], team_name) } + + team_roles_stacks = local.dynamic_role_enabled ? { + for k, v in yamldecode(data.utils_describe_stacks.team_roles[0].output) : k => v if !local.stack_has_namespace || try(split(module.this.delimiter, k)[local.stack_namespace_index] == module.this.namespace, false) + } : local.empty + + team_roles_vars = { for k, v in local.team_roles_stacks : k => v.components.terraform.aws-team-roles.vars if try(v.components.terraform.aws-team-roles.vars, null) != null } + + all_team_vars = merge(local.teams_vars, local.team_roles_vars) + + stack_planners = { for k, v in local.team_roles_vars : k => v.roles[local.plan_role].trusted_teams if try(length(v.roles[local.plan_role].trusted_teams), 0) > 0 && try(v.roles[local.plan_role].enabled, true) } + stack_terraformers = { for k, v in local.team_roles_vars : k => v.roles[local.apply_role].trusted_teams if try(length(v.roles[local.apply_role].trusted_teams), 0) > 0 && try(v.roles[local.apply_role].enabled, true) } + + team_planners = { for team in local.team_names : team => { + for stack, trusted in local.stack_planners : local.stack_account_map[stack] => "plan" if contains(trusted, team) + } } + team_terraformers = { for team in local.team_names : team => { + for stack, trusted in local.stack_terraformers : local.stack_account_map[stack] => "apply" if contains(trusted, team) + } } + + role_arn_terraform_access = { for team in local.team_names : local.team_arns[team] => merge(local.team_planners[team], local.team_terraformers[team]) } +} + +module "atmos" { + # local.all_team_vars is empty map when dynamic_role_enabled is false + for_each = local.all_team_vars + + source = "cloudposse/label/null" + version = "0.25.0" + + enabled = true + namespace = lookup(each.value, "namespace", null) + tenant = lookup(each.value, "tenant", null) + environment = lookup(each.value, "environment", null) + stage = lookup(each.value, "stage", null) + name = lookup(each.value, "name", null) + delimiter = lookup(each.value, "delimiter", null) + attributes = lookup(each.value, "attributes", []) + tags = lookup(each.value, "tags", {}) + additional_tag_map = lookup(each.value, "additional_tag_map", {}) + label_order = lookup(each.value, "label_order", []) + regex_replace_chars = lookup(each.value, "regex_replace_chars", null) + id_length_limit = lookup(each.value, "id_length_limit", null) + label_key_case = lookup(each.value, "label_key_case", null) + label_value_case = lookup(each.value, "label_value_case", null) + descriptor_formats = lookup(each.value, "descriptor_formats", {}) + labels_as_tags = lookup(each.value, "labels_as_tags", []) +} diff --git a/modules/account-map/main.tf b/modules/account-map/main.tf index a28e8a4b0..8431cebbb 100644 --- a/modules/account-map/main.tf +++ b/modules/account-map/main.tf @@ -3,11 +3,12 @@ data "aws_organizations_organization" "organization" {} data "aws_partition" "current" {} locals { - aws_partition = data.aws_partition.current.partition + aws_partition = data.aws_partition.current.partition + legacy_terraform_uses_admin = coalesce(var.legacy_terraform_uses_admin, !var.terraform_dynamic_role_enabled) full_account_map = { for acct in data.aws_organizations_organization.organization.accounts - : acct.name == var.root_account_aws_name ? var.root_account_account_name : acct.name => acct.id + : acct.name == var.root_account_aws_name ? var.root_account_account_name : acct.name => acct.id if acct.status != "SUSPENDED" } iam_role_arn_templates = { @@ -29,6 +30,11 @@ locals { all_accounts = concat(local.eks_accounts, local.non_eks_accounts) account_info_map = module.accounts.outputs.account_info_map + # Provide empty lists for deprecated outputs, to avoid breaking old code + # before it can be replaced. + empty_account_map = merge({ for name, info in local.account_info_map : name => "" }, { _OBSOLETE = "DUMMY RESULTS for backwards compatibility" }) + + # We should move this to be specified by tags on the accounts, # like we do with EKS, but for now.... account_role_map = { @@ -57,75 +63,29 @@ locals { terraform_roles = { - for name, info in local.account_info_map : name => - format(local.iam_role_arn_templates[name], - (contains([ - var.root_account_account_name, - var.identity_account_account_name - ], name) ? "admin" : "terraform") - ) - } - - terraform_profiles = { - for name, info in local.account_info_map : name => format(var.profile_template, compact( - [ - module.this.namespace, - lookup(info, "tenant", ""), - module.this.environment, - info.stage, - (contains([ + for name, info in local.account_info_map : name => format(local.iam_role_arn_templates[name], + (local.legacy_terraform_uses_admin && + contains([ var.root_account_account_name, var.identity_account_account_name - ], name) ? "admin" : "terraform") - ] - )...) - } - - helm_roles = { - for name, info in local.account_info_map : name => - format(local.iam_role_arn_templates[name], - (contains([ - var.root_account_account_name, - var.identity_account_account_name - ], name) ? "admin" : "helm") - ) - + ], name) + ) ? "admin" : "terraform") } - helm_profiles = { + # legacy support for `aws` config profiles + terraform_profiles = { for name, info in local.account_info_map : name => format(var.profile_template, compact( [ module.this.namespace, lookup(info, "tenant", ""), module.this.environment, info.stage, - (contains([ - var.root_account_account_name, - var.identity_account_account_name - ], name) ? "admin" : "helm") - ] - )...) - } - - cicd_roles = { - for name, info in local.account_info_map : name => - format(local.iam_role_arn_templates[name], - (contains([ - var.root_account_account_name - ], name) ? "admin" : "cicd") - ) - } - - cicd_profiles = { - for name, info in local.account_info_map : name => format(var.profile_template, compact( - [ - module.this.namespace, - lookup(info, "tenant", ""), - var.global_environment_name, - info.stage, - (contains([ - var.root_account_account_name - ], name) ? "admin" : "cicd") + ((local.legacy_terraform_uses_admin && + contains([ + var.root_account_account_name, + var.identity_account_account_name + ], name) + ) ? "admin" : "terraform"), ] )...) } diff --git a/modules/account-map/modules/iam-roles/README.md b/modules/account-map/modules/iam-roles/README.md index 984c9beaa..3fb46aa56 100644 --- a/modules/account-map/modules/iam-roles/README.md +++ b/modules/account-map/modules/iam-roles/README.md @@ -1,15 +1,14 @@ # Submodule `iam-roles` -This submodule is used by other modules to determine which IAM Roles -or AWS CLI Config Profiles to use for various tasks, most commonly -for applying Terraform plans. +This submodule is used by other modules to determine which IAM Roles or AWS CLI Config Profiles to use for various +tasks, most commonly for applying Terraform plans. ## Special Configuration Needed -In order to avoid having to pass customization information through every module -that uses this submodule, if the default configuration does not suit your needs, -you are expected to customize `variables.tf` with the defaults you want to -use in your project. For example, if you are including the `tenant` label -in the designation of your "root" account (your Organization Management Account), -then you should modify `variables.tf` so that `global_tenant_name` defaults -to the appropriate value. +In order to avoid having to pass customization information through every module that uses this submodule, if the default +configuration does not suit your needs, you are expected to add `variables_override.tf` to override the variables with +the defaults you want to use in your project. For example, if you are not using "core" as the `tenant` portion of your +"root" account (your Organization Management Account), then you should include the +`variable "overridable_global_tenant_name"` declaration in your `variables_override.tf` so that +`overridable_global_tenant_name` defaults to the value you are using (or the empty string if you are not using `tenant` +at all). diff --git a/modules/account-map/modules/iam-roles/main.tf b/modules/account-map/modules/iam-roles/main.tf index 57ffce386..3e69a1de3 100644 --- a/modules/account-map/modules/iam-roles/main.tf +++ b/modules/account-map/modules/iam-roles/main.tf @@ -1,3 +1,10 @@ + +data "awsutils_caller_identity" "current" { + count = local.dynamic_terraform_role_enabled ? 1 : 0 + # Avoid conflict with caller's provider which is using this module's output to assume a role. + provider = awsutils.iam-roles +} + module "always" { source = "cloudposse/label/null" version = "0.25.0" @@ -10,17 +17,68 @@ module "always" { module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = "account-map" privileged = var.privileged - tenant = var.global_tenant_name - environment = var.global_environment_name - stage = var.global_stage_name + tenant = var.overridable_global_tenant_name + environment = var.overridable_global_environment_name + stage = var.overridable_global_stage_name context = module.always.context } locals { - account_name = lookup(module.always.descriptors, "account_name", module.always.stage) + profiles_enabled = coalesce(var.profiles_enabled, local.account_map.profiles_enabled) + + dynamic_terraform_role_enabled = try(local.account_map.terraform_dynamic_role_enabled, false) + + account_map = module.account_map.outputs + account_name = lookup(module.always.descriptors, "account_name", module.always.stage) + root_account_name = local.account_map.root_account_account_name + + current_user_role_arn = coalesce(one(data.awsutils_caller_identity.current[*].eks_role_arn), one(data.awsutils_caller_identity.current[*].arn), "disabled") + + current_identity_account = local.dynamic_terraform_role_enabled ? split(":", local.current_user_role_arn)[4] : "" + + terraform_access_map = try(local.account_map.terraform_access_map[local.current_user_role_arn], {}) + + is_root_user = local.current_identity_account == local.account_map.full_account_map[local.root_account_name] + is_target_user = local.current_identity_account == local.account_map.full_account_map[local.account_name] + + account_org_role_arns = { for name, id in local.account_map.full_account_map : name => + name == local.root_account_name ? null : format( + "arn:%s:iam::%s:role/OrganizationAccountAccessRole", local.account_map.aws_partition, id + ) + } + + static_terraform_roles = local.account_map.terraform_roles + + dynamic_terraform_role_maps = { + for account_name in local.account_map.all_accounts : account_name => { + apply = format(local.account_map.iam_role_arn_templates[account_name], local.account_map.terraform_role_name_map["apply"]) + plan = format(local.account_map.iam_role_arn_templates[account_name], local.account_map.terraform_role_name_map["plan"]) + # For user without explicit permissions: + # If the current user is a user in the `root` account, assume the `OrganizationAccountAccessRole` role in the target account. + # If the current user is a user in the target account, do not assume a role at all, let them do what their role allows. + # Otherwise, force them into the static Terraform role for the target account, + # to prevent users from accidentally running Terraform in the wrong account. + none = local.is_root_user ? local.account_org_role_arns[account_name] : ( + # null means use current user's role + local.is_target_user ? null : local.static_terraform_roles[account_name] + ) + } + } + + dynamic_terraform_role_types = { for account_name in local.account_map.all_accounts : + account_name => try(local.terraform_access_map[account_name], "none") + } + + dynamic_terraform_roles = { for account_name in local.account_map.all_accounts : + account_name => local.dynamic_terraform_role_maps[account_name][local.dynamic_terraform_role_types[account_name]] + } + + final_terraform_role_arns = { for account_name in local.account_map.all_accounts : account_name => + local.dynamic_terraform_role_enabled ? local.dynamic_terraform_roles[account_name] : local.static_terraform_roles[account_name] + } } diff --git a/modules/account-map/modules/iam-roles/outputs.tf b/modules/account-map/modules/iam-roles/outputs.tf index ff281d756..049380636 100644 --- a/modules/account-map/modules/iam-roles/outputs.tf +++ b/modules/account-map/modules/iam-roles/outputs.tf @@ -1,92 +1,87 @@ output "terraform_role_arn" { - value = module.account_map.outputs.terraform_roles[local.account_name] + value = local.profiles_enabled ? null : local.final_terraform_role_arns[local.account_name] description = "The AWS Role ARN for Terraform to use when provisioning resources in the account, when Role ARNs are in use" } output "terraform_role_arns" { - value = module.account_map.outputs.terraform_roles + value = local.account_map.terraform_roles description = "All of the terraform role arns" } output "terraform_profile_name" { - value = module.account_map.outputs.terraform_profiles[local.account_name] + value = local.profiles_enabled ? local.account_map.profiles[local.account_name] : null description = "The AWS config profile name for Terraform to use when provisioning resources in the account, when profiles are in use" } output "aws_partition" { - value = module.account_map.outputs.aws_partition + value = local.account_map.aws_partition description = "The AWS \"partition\" to use when constructing resource ARNs" } output "org_role_arn" { - value = local.account_name == module.account_map.outputs.root_account_account_name ? null : format( - "arn:%s:iam::%s:role/OrganizationAccountAccessRole", module.account_map.outputs.aws_partition, - module.account_map.outputs.full_account_map[local.account_name] - ) + value = local.account_org_role_arns[local.account_name] description = "The AWS Role ARN for Terraform to use when SuperAdmin is provisioning resources in the account" } output "global_tenant_name" { - value = var.global_tenant_name + value = var.overridable_global_tenant_name description = "The `null-label` `tenant` value used for organization-wide resources" } output "global_environment_name" { - value = var.global_environment_name + value = var.overridable_global_environment_name description = "The `null-label` `environment` value used for regionless (global) resources" } output "global_stage_name" { - value = var.global_stage_name + value = var.overridable_global_stage_name description = "The `null-label` `stage` value for the organization management account (where the `account-map` state is stored)" } +output "current_account_account_name" { + value = local.account_name + description = <<-EOT + The account name (usually `-`) for the account configured by this module's inputs. + Roughly analogous to `data "aws_caller_identity"`, but returning the name of the caller account as used in our configuration. + EOT +} + output "dns_terraform_role_arn" { - value = module.account_map.outputs.terraform_roles[module.account_map.outputs.dns_account_account_name] + value = local.profiles_enabled ? null : local.final_terraform_role_arns[local.account_map.dns_account_account_name] description = "The AWS Role ARN for Terraform to use to provision DNS Zone delegations, when Role ARNs are in use" } output "dns_terraform_profile_name" { - value = module.account_map.outputs.terraform_profiles[module.account_map.outputs.dns_account_account_name] + value = local.profiles_enabled ? local.account_map.terraform_profiles[local.account_map.dns_account_account_name] : null description = "The AWS config profile name for Terraform to use to provision DNS Zone delegations, when profiles are in use" } output "audit_terraform_role_arn" { - value = module.account_map.outputs.terraform_roles[module.account_map.outputs.audit_account_account_name] + value = local.profiles_enabled ? null : local.final_terraform_role_arns[local.account_map.audit_account_account_name] description = "The AWS Role ARN for Terraform to use to provision resources in the \"audit\" role account, when Role ARNs are in use" } output "audit_terraform_profile_name" { - value = module.account_map.outputs.terraform_profiles[module.account_map.outputs.audit_account_account_name] + value = local.profiles_enabled ? local.account_map.terraform_profiles[local.account_map.audit_account_account_name] : null description = "The AWS config profile name for Terraform to use to provision resources in the \"audit\" role account, when profiles are in use" } output "identity_account_account_name" { - value = module.account_map.outputs.identity_account_account_name + value = local.account_map.identity_account_account_name description = "The account name (usually `-`) for the account holding primary IAM roles" } output "identity_terraform_role_arn" { - value = module.account_map.outputs.terraform_roles[module.account_map.outputs.identity_account_account_name] + value = local.profiles_enabled ? null : local.final_terraform_role_arns[local.account_map.identity_account_account_name] description = "The AWS Role ARN for Terraform to use to provision resources in the \"identity\" role account, when Role ARNs are in use" } output "identity_terraform_profile_name" { - value = module.account_map.outputs.terraform_profiles[module.account_map.outputs.identity_account_account_name] + value = local.profiles_enabled ? local.account_map.terraform_profiles[local.account_map.identity_account_account_name] : null description = "The AWS config profile name for Terraform to use to provision resources in the \"identity\" role account, when profiles are in use" } -output "identity_cicd_role_arn" { - value = module.account_map.outputs.cicd_roles[module.account_map.outputs.identity_account_account_name] - description = "(Deprecated) The AWS Role ARN for CI/CD tools to assume to gain access to other accounts, when Role ARNs are in use" -} - -output "identity_cicd_profile_name" { - value = module.account_map.outputs.cicd_profiles[module.account_map.outputs.identity_account_account_name] - description = "(Deprecated) The AWS config profile name for CI/CD tools to assume to gain access to other accounts, when profiles are in use" -} - output "profiles_enabled" { - value = module.account_map.outputs.profiles_enabled + value = local.profiles_enabled description = "When true, use AWS config profiles in Terraform AWS provider configurations. When false, use Role ARNs." } diff --git a/modules/account-map/modules/iam-roles/providers.tf b/modules/account-map/modules/iam-roles/providers.tf new file mode 100644 index 000000000..55f8c188b --- /dev/null +++ b/modules/account-map/modules/iam-roles/providers.tf @@ -0,0 +1,11 @@ +provider "awsutils" { + # Components may want to use awsutils, and when they do, they typically want to use it in the assumed IAM role. + # That conflicts with this module's needs, so we create a separate provider alias for this module to use. + alias = "iam-roles" + + # If the provider block is empty, Terraform will output a deprecation warning, + # because earlier versions of Terraform used empty provider blocks to declare provider requirements, + # which is now deprecated in favor of the required_providers block. + # So we add a useless setting to the provider block to avoid the deprecation warning. + profile = null +} diff --git a/modules/account-map/modules/iam-roles/variables.tf b/modules/account-map/modules/iam-roles/variables.tf index a0247e566..247a08c38 100644 --- a/modules/account-map/modules/iam-roles/variables.tf +++ b/modules/account-map/modules/iam-roles/variables.tf @@ -1,23 +1,34 @@ variable "privileged" { type = bool - description = "True if the default provider already has access to the backend" + description = "True if the Terraform user already has access to the backend" default = false } -variable "global_tenant_name" { +variable "profiles_enabled" { + type = bool + description = "Whether or not to use profiles instead of roles for Terraform. Default (null) means to use global settings." + default = null +} + + +## The overridable_* variables in this file provide Cloud Posse defaults. +## Because this module is used in bootstrapping Terraform, we do not configure +## these inputs in the normal way. Instead, to change the values, you should +## add a `variables_override.tf` file and change the default to the value you want. +variable "overridable_global_tenant_name" { type = string description = "The tenant name used for organization-wide resources" - default = "gov" + default = "core" } -variable "global_environment_name" { +variable "overridable_global_environment_name" { type = string description = "Global environment name" default = "gbl" } -variable "global_stage_name" { +variable "overridable_global_stage_name" { type = string - description = "The stage name for the organization management account (where the `accout-map` state is stored)" + description = "The stage name for the organization management account (where the `account-map` state is stored)" default = "root" } diff --git a/modules/account-map/modules/iam-roles/versions.tf b/modules/account-map/modules/iam-roles/versions.tf new file mode 100644 index 000000000..e0cac65a2 --- /dev/null +++ b/modules/account-map/modules/iam-roles/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.2.0" + + required_providers { + awsutils = { + source = "cloudposse/awsutils" + version = ">= 0.16.0" + } + } +} diff --git a/modules/account-map/modules/roles-to-principals/README.md b/modules/account-map/modules/roles-to-principals/README.md index a24094074..65e45e000 100644 --- a/modules/account-map/modules/roles-to-principals/README.md +++ b/modules/account-map/modules/roles-to-principals/README.md @@ -1,16 +1,14 @@ # Submodule `roles-to-principals` -This submodule is used by other modules to map short role names and AWS -SSO Permission Set names in accounts designated by short account names -(for example, `terraform` in the `dev` account) to full IAM Role ARNs and -other related tasks. +This submodule is used by other modules to map short role names and AWS SSO Permission Set names in accounts designated +by short account names (for example, `terraform` in the `dev` account) to full IAM Role ARNs and other related tasks. ## Special Configuration Needed -In order to avoid having to pass customization information through every module -that uses this submodule, if the default configuration does not suit your needs, -you are expected to customize `variables.tf` with the defaults you want to -use in your project. For example, if you are including the `tenant` label -in the designation of your "root" account (your Organization Management Account), -then you should modify `variables.tf` so that `global_tenant_name` defaults -to the appropriate value. +As with `iam-roles`, in order to avoid having to pass customization information through every module that uses this +submodule, if the default configuration does not suit your needs, you are expected to add `variables_override.tf` to +override the variables with the defaults you want to use in your project. For example, if you are not using "core" as +the `tenant` portion of your "root" account (your Organization Management Account), then you should include the +`variable "overridable_global_tenant_name"` declaration in your `variables_override.tf` so that +`overridable_global_tenant_name` defaults to the value you are using (or the empty string if you are not using `tenant` +at all). diff --git a/modules/account-map/modules/roles-to-principals/main.tf b/modules/account-map/modules/roles-to-principals/main.tf index 31db906db..94aba013a 100644 --- a/modules/account-map/modules/roles-to-principals/main.tf +++ b/modules/account-map/modules/roles-to-principals/main.tf @@ -10,31 +10,49 @@ module "always" { module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = "account-map" privileged = var.privileged - tenant = var.global_tenant_name - environment = var.global_environment_name - stage = var.global_stage_name + tenant = var.overridable_global_tenant_name + environment = var.overridable_global_environment_name + stage = var.overridable_global_stage_name context = module.always.context } locals { - aws_partition = module.account_map.outputs.aws_partition - - principals = distinct(compact(flatten([for acct, v in var.role_map : ( - contains(v, "*") ? [format("arn:%s:iam::%s:root", local.aws_partition, module.account_map.outputs.full_account_map[acct])] : - [ - for role in v : format(module.account_map.outputs.iam_role_arn_templates[acct], role) - ] - )]))) + aws_partition = module.account_map.outputs.aws_partition + team_ps_pattern = var.overridable_team_permission_set_name_pattern + identity_account_name = module.account_map.outputs.identity_account_account_name + teams_from_role_map = var.overridable_team_permission_sets_enabled ? try(var.role_map[local.identity_account_name], []) : [] + + team_permission_set_name_map = { + for team in distinct(concat(var.teams, local.teams_from_role_map)) : team => format(local.team_ps_pattern, replace(title(replace(team, "_", "-")), "-", "")) + } + permission_sets_from_team_roles = [for team in local.teams_from_role_map : local.team_permission_set_name_map[team]] + + principals_map = { for acct, v in var.role_map : acct => ( + contains(v, "*") ? { + "*" = format("arn:%s:iam::%s:root", local.aws_partition, module.account_map.outputs.full_account_map[acct]) + } : + { + for role in v : role => format(module.account_map.outputs.iam_role_arn_templates[acct], role) + } + ) } + + # This expression could be simplified, but then the order of principals would be different than in earlier versions, causing unnecessary plan changes. + principals = distinct(compact(flatten([for acct, v in var.role_map : values(local.principals_map[acct])]))) # Support for AWS SSO Permission Sets - permission_set_arn_like = distinct(compact(flatten([for acct, v in var.permission_set_map : formatlist( - # arn:aws:iam::550826706431:role/aws-reserved/sso.amazonaws.com/ap-southeast-1/AWSReservedSSO_IdentityAdminRoleAccess_b68e107e9495e2fc - # AWS SSO Sometimes includes `/region/`, but not always. + # We ensure that the identity account is included in the map so that we can add the permission sets from team roles to it. + permission_set_arn_like = distinct(compact(flatten([for acct, v in merge({ (local.identity_account_name) = [] }, var.permission_set_map) : formatlist( + # Usually like: + # arn:aws:iam::123456789012:role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_IdentityAdminRoleAccess_b68e107e9495e2fc + # But sometimes AWS SSO ARN includes `/region/`, like: + # arn:aws:iam::123456789012:role/aws-reserved/sso.amazonaws.com/ap-southeast-1/AWSReservedSSO_IdentityAdminRoleAccess_b68e107e9495e2fc + # If trust polices get too large, some space can be saved by using `*` instead of `aws-reserved/sso.amazonaws.com*` format("arn:%s:iam::%s:role/aws-reserved/sso.amazonaws.com*/AWSReservedSSO_%%s_*", local.aws_partition, module.account_map.outputs.full_account_map[acct]), - v)]))) + acct == local.identity_account_name ? distinct(concat(v, local.permission_sets_from_team_roles)) : v + )]))) } diff --git a/modules/account-map/modules/roles-to-principals/outputs.tf b/modules/account-map/modules/roles-to-principals/outputs.tf index 2ff84a8cf..461530a4d 100644 --- a/modules/account-map/modules/roles-to-principals/outputs.tf +++ b/modules/account-map/modules/roles-to-principals/outputs.tf @@ -1,6 +1,11 @@ output "principals" { value = local.principals - description = "List of AWS principals corresponding to given input `role_map`" + description = "Consolidated list of AWS principals corresponding to given input `role_map`" +} + +output "principals_map" { + value = local.principals_map + description = "Map of AWS principals corresponding to given input `role_map`" } output "permission_set_arn_like" { @@ -8,6 +13,11 @@ output "permission_set_arn_like" { description = "List of Role ARN regexes suitable for IAM Condition `ArnLike` corresponding to given input `permission_set_map`" } +output "team_permission_set_name_map" { + value = local.team_permission_set_name_map + description = "Map of team names (from `var.teams` and `role_map[\"identity\"]) to permission set names" +} + output "full_account_map" { value = module.account_map.outputs.full_account_map description = "Map of account names to account IDs" diff --git a/modules/account-map/modules/roles-to-principals/variables.tf b/modules/account-map/modules/roles-to-principals/variables.tf index eb0a06ce6..f942b2418 100644 --- a/modules/account-map/modules/roles-to-principals/variables.tf +++ b/modules/account-map/modules/roles-to-principals/variables.tf @@ -1,6 +1,7 @@ variable "role_map" { type = map(list(string)) description = "Map of account:[role, role...]. Use `*` as role for entire account" + default = {} } variable "permission_set_map" { @@ -9,26 +10,53 @@ variable "permission_set_map" { default = {} } +variable "teams" { + type = list(string) + description = "List of team names to translate to AWS SSO PermissionSet names" + default = [] +} + variable "privileged" { type = bool description = "True if the default provider already has access to the backend" default = false } -variable "global_tenant_name" { +## The overridable_* variables in this file provide Cloud Posse defaults. +## Because this module is used in bootstrapping Terraform, we do not configure +## these inputs in the normal way. Instead, to change the values, you should +## add a `variables_override.tf` file and change the default to the value you want. +variable "overridable_global_tenant_name" { type = string description = "The tenant name used for organization-wide resources" default = "core" } -variable "global_environment_name" { +variable "overridable_global_environment_name" { type = string description = "Global environment name" default = "gbl" } -variable "global_stage_name" { +variable "overridable_global_stage_name" { type = string - description = "The stage name for the organization management account (where the `accout-map` state is stored)" + description = "The stage name for the organization management account (where the `account-map` state is stored)" default = "root" } + +variable "overridable_team_permission_set_name_pattern" { + type = string + description = "The pattern used to generate the AWS SSO PermissionSet name for each team" + default = "Identity%sTeamAccess" +} + +variable "overridable_team_permission_sets_enabled" { + type = bool + description = <<-EOT + When true, any roles (teams or team-roles) in the identity account references in `role_map` + will cause corresponding AWS SSO PermissionSets to be included in the `permission_set_arn_like` output. + This has the effect of treating those PermissionSets as if they were teams. + The main reason to set this `false` is if IAM trust policies are exceeding size limits and you are not using AWS SSO. + EOT + default = true +} diff --git a/modules/account-map/modules/team-assume-role-policy/README.md b/modules/account-map/modules/team-assume-role-policy/README.md index bc15cfdaa..f309bf33c 100644 --- a/modules/account-map/modules/team-assume-role-policy/README.md +++ b/modules/account-map/modules/team-assume-role-policy/README.md @@ -2,13 +2,14 @@ This submodule generates a JSON-encoded IAM Policy Document suitable for use as an "Assume Role Policy". -You can designate both who is allowed to assume a role and who is explicitly denied permission -to assume a role. The value of this submodule is that it allows for many ways -to specify the "who" while at the same time limiting the "who" to assumed IAM roles: +You can designate both who is allowed to assume a role and who is explicitly denied permission to assume a role. The +value of this submodule is that it allows for many ways to specify the "who" while at the same time limiting the "who" +to assumed IAM roles: - All assumed roles in the `dev` account: `allowed_roles = { dev = ["*"] }` - Only the `admin` role in the dev account: `allowed_roles = { dev = ["admin"] }` -- A specific principal in any account (though it must still be an assumed role): `allowed_principal_arns = arn:aws:iam::123456789012:role/trusted-role` +- A specific principal in any account (though it must still be an assumed role): + `allowed_principal_arns = arn:aws:iam::123456789012:role/trusted-role` - A user of a specific AWS SSO Permission Set: `allowed_permission_sets = { dev = ["DeveloperAccess"] }` ## Usage @@ -30,6 +31,7 @@ resource "aws_iam_role" "default" { } ``` + ## Requirements @@ -47,7 +49,7 @@ No requirements. |------|--------|---------| | [allowed\_role\_map](#module\_allowed\_role\_map) | ../../../account-map/modules/roles-to-principals | n/a | | [denied\_role\_map](#module\_denied\_role\_map) | ../../../account-map/modules/roles-to-principals | n/a | -| [github\_oidc\_provider](#module\_github\_oidc\_provider) | cloudposse/stack-config/yaml//modules/remote-state | 1.0.0 | +| [github\_oidc\_provider](#module\_github\_oidc\_provider) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -100,3 +102,4 @@ No requirements. | [github\_assume\_role\_policy](#output\_github\_assume\_role\_policy) | JSON encoded string representing the "Assume Role" policy configured by the inputs | | [policy\_document](#output\_policy\_document) | JSON encoded string representing the "Assume Role" policy configured by the inputs | + diff --git a/modules/account-map/modules/team-assume-role-policy/github-assume-role-policy.mixin.tf b/modules/account-map/modules/team-assume-role-policy/github-assume-role-policy.mixin.tf index 88fa9de73..04a63d3d2 100644 --- a/modules/account-map/modules/team-assume-role-policy/github-assume-role-policy.mixin.tf +++ b/modules/account-map/modules/team-assume-role-policy/github-assume-role-policy.mixin.tf @@ -25,6 +25,19 @@ locals { github_oidc_enabled = length(var.trusted_github_repos) > 0 } +locals { + trusted_github_repos_regexp = "^(?:(?P[^://]*)\\/)?(?P[^://]*):?(?P[^://]*)?$" + trusted_github_repos_sub = [for r in var.trusted_github_repos : regex(local.trusted_github_repos_regexp, r)] + + github_repos_sub = [ + for r in local.trusted_github_repos_sub : ( + r["branch"] == "" ? + format("repo:%s/%s:*", coalesce(r["org"], var.trusted_github_org), r["repo"]) : + format("repo:%s/%s:ref:refs/heads/%s", coalesce(r["org"], var.trusted_github_org), r["repo"], r["branch"]) + ) + ] +} + data "aws_iam_policy_document" "github_oidc_provider_assume" { count = local.github_oidc_enabled ? 1 : 0 @@ -32,6 +45,7 @@ data "aws_iam_policy_document" "github_oidc_provider_assume" { sid = "OidcProviderAssume" actions = [ "sts:AssumeRoleWithWebIdentity", + "sts:SetSourceIdentity", "sts:TagSession", ] @@ -51,7 +65,7 @@ data "aws_iam_policy_document" "github_oidc_provider_assume" { test = "StringLike" variable = "token.actions.githubusercontent.com:sub" - values = [for r in var.trusted_github_repos : "repo:${contains(split("", r), "/") ? r : "${var.trusted_github_org}/${r}"}:*"] + values = local.github_repos_sub } } } @@ -60,7 +74,7 @@ module "github_oidc_provider" { count = local.github_oidc_enabled ? 1 : 0 source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.0.0" + version = "1.5.0" component = "github-oidc-provider" environment = var.global_environment_name diff --git a/modules/account-map/modules/team-assume-role-policy/main.tf b/modules/account-map/modules/team-assume-role-policy/main.tf index 6877cb30b..edf2ddbe4 100644 --- a/modules/account-map/modules/team-assume-role-policy/main.tf +++ b/modules/account-map/modules/team-assume-role-policy/main.tf @@ -1,21 +1,23 @@ locals { enabled = module.this.enabled - allowed_principals = sort(distinct(concat(var.allowed_principal_arns, module.allowed_role_map.principals, module.allowed_role_map.permission_set_arn_like))) + allowed_roles = concat(module.allowed_role_map.principals, module.allowed_role_map.permission_set_arn_like) + allowed_principals = sort(var.allowed_principal_arns) allowed_account_names = compact(concat( [for k, v in var.allowed_roles : k if length(v) > 0], [for k, v in var.allowed_permission_sets : k if length(v) > 0] )) - allowed_mapped_accounts = [for acct in local.allowed_account_names : module.allowed_role_map.full_account_map[acct]] - allowed_arn_accounts = data.aws_arn.allowed[*].account - allowed_accounts = sort(distinct(concat(local.allowed_mapped_accounts, local.allowed_arn_accounts))) + allowed_mapped_accounts = [for acct in local.allowed_account_names : module.allowed_role_map.full_account_map[acct]] + allowed_principals_accounts = data.aws_arn.allowed[*].account + # allowed_accounts = sort(distinct(concat(local.allowed_mapped_accounts, local.allowed_arn_accounts))) denied_principals = sort(distinct(concat(var.denied_principal_arns, module.denied_role_map.principals, module.denied_role_map.permission_set_arn_like))) denied_mapped_accounts = [for acct in concat(keys(var.denied_roles), keys(var.denied_permission_sets)) : module.denied_role_map.full_account_map[acct]] denied_arn_accounts = data.aws_arn.denied[*].account denied_accounts = sort(distinct(concat(local.denied_mapped_accounts, local.denied_arn_accounts))) - assume_role_enabled = (length(local.allowed_accounts) + length(local.denied_accounts)) > 0 + undenied_principals = sort(tolist(setsubtract(toset(local.allowed_principals), toset(local.denied_principals)))) + assume_role_enabled = (length(local.allowed_mapped_accounts) + length(local.allowed_principals_accounts) + length(local.denied_accounts)) > 0 aws_partition = module.allowed_role_map.aws_partition } @@ -57,7 +59,7 @@ data "aws_iam_policy_document" "assume_role" { count = local.enabled && local.assume_role_enabled ? 1 : 0 dynamic "statement" { - for_each = length(local.allowed_accounts) > 0 ? ["accounts"] : [] + for_each = length(local.allowed_mapped_accounts) > 0 && length(local.allowed_roles) > 0 ? ["accounts-roles"] : [] content { sid = "RoleAssumeRole" @@ -65,6 +67,7 @@ data "aws_iam_policy_document" "assume_role" { effect = "Allow" actions = [ "sts:AssumeRole", + "sts:SetSourceIdentity", "sts:TagSession", ] @@ -76,34 +79,73 @@ data "aws_iam_policy_document" "assume_role" { condition { test = "ArnLike" variable = "aws:PrincipalArn" - values = local.allowed_principals + values = local.allowed_roles } principals { type = "AWS" # Principals is a required field, so we allow any principal in any of the accounts, restricted by the assumed Role ARN in the condition clauses. # This allows us to allow non-existent (yet to be created) roles, which would not be allowed if directly specified in `principals`. - identifiers = formatlist("arn:${local.aws_partition}:iam::%s:root", local.allowed_accounts) + identifiers = formatlist("arn:${local.aws_partition}:iam::%s:root", local.allowed_mapped_accounts) } } } + dynamic "statement" { + for_each = length(local.allowed_principals_accounts) > 0 && length(local.allowed_principals) > 0 ? ["accounts-principals"] : [] + + content { + sid = "PrincipalAssumeRole" + + effect = "Allow" + actions = [ + "sts:AssumeRole", + "sts:SetSourceIdentity", + "sts:TagSession", + ] + + condition { + test = "ArnLike" + variable = "aws:PrincipalArn" + values = local.allowed_principals + } + + principals { + type = "AWS" + # Principals is a required field, so we allow any principal in any of the accounts, restricted by the assumed Role ARN in the condition clauses. + # This allows us to allow non-existent (yet to be created) roles, which would not be allowed if directly specified in `principals`. + identifiers = formatlist("arn:${local.aws_partition}:iam::%s:root", local.allowed_principals_accounts) + } + } + } # As a safety measure, we do not allow AWS Users (not Roles) to assume the SAML Teams or Team roles - # unless `deny_all_iam_users` is explicitly set to `false`. - # In particular, this prevents SuperAdmin from running Terraform on components that should be handled by Spacelift. + # unless `deny_all_iam_users` is explicitly set to `false` or the user is explicitly allowed. statement { sid = "RoleDenyAssumeRole" effect = "Deny" actions = [ "sts:AssumeRole", + "sts:SetSourceIdentity", "sts:TagSession", ] condition { test = "ArnLike" variable = "aws:PrincipalArn" - values = compact(concat(local.denied_principals, var.iam_users_enabled ? [] : ["arn:${local.aws_partition}:iam::*:user/*"])) + values = compact(concat(local.denied_principals, var.iam_users_enabled ? [] : [ + "arn:${local.aws_partition}:iam::*:user/*" + ])) + } + + dynamic "condition" { + for_each = length(local.undenied_principals) > 0 ? ["exceptions"] : [] + + content { + test = "ArnNotEquals" + variable = "aws:PrincipalArn" + values = local.undenied_principals + } } principals { @@ -112,7 +154,7 @@ data "aws_iam_policy_document" "assume_role" { # Principals is a required field, so we allow any principal in any of the accounts, restricted by the assumed Role ARN in the condition clauses. # This allows us to allow non-existent (yet to be created) roles, which would not be allowed if directly specified in `principals`. # We also deny all directly logged-in users from all the enabled accounts. - identifiers = formatlist("arn:${local.aws_partition}:iam::%s:root", sort(distinct(concat(local.denied_accounts, local.allowed_accounts)))) + identifiers = formatlist("arn:${local.aws_partition}:iam::%s:root", sort(distinct(concat(local.denied_accounts, local.allowed_mapped_accounts, local.allowed_principals_accounts)))) } } } diff --git a/modules/account-map/modules/team-assume-role-policy/outputs.tf b/modules/account-map/modules/team-assume-role-policy/outputs.tf index e9efb142a..2f4d7cd1b 100644 --- a/modules/account-map/modules/team-assume-role-policy/outputs.tf +++ b/modules/account-map/modules/team-assume-role-policy/outputs.tf @@ -1,4 +1,4 @@ output "policy_document" { description = "JSON encoded string representing the \"Assume Role\" policy configured by the inputs" - value = join("", data.aws_iam_policy_document.assume_role.*.json) + value = join("", data.aws_iam_policy_document.assume_role[*].json) } diff --git a/modules/account-map/outputs.tf b/modules/account-map/outputs.tf index 51f6855de..76dc133dd 100644 --- a/modules/account-map/outputs.tf +++ b/modules/account-map/outputs.tf @@ -81,29 +81,35 @@ output "terraform_profiles" { description = "A list of all SSO profiles used to run terraform updates" } -output "helm_roles" { - value = local.helm_roles - description = "A list of all IAM roles used to run helm updates" +output "profiles_enabled" { + value = var.profiles_enabled + description = "Whether or not to enable profiles instead of roles for the backend" } -output "helm_profiles" { - value = local.helm_profiles - description = "A list of all SSO profiles used to run helm updates" +output "terraform_dynamic_role_enabled" { + value = local.dynamic_role_enabled + description = "True if dynamic role for Terraform is enabled" + precondition { + condition = local.dynamic_role_enabled && var.profiles_enabled ? false : true + error_message = "Dynamic role for Terraform cannot be used with profiles. One of `terraform_dynamic_role_enabled` or `profiles_enabled` must be false." + } } -output "cicd_roles" { - value = local.cicd_roles - description = "A list of all IAM roles used by cicd platforms" -} +output "terraform_access_map" { + value = local.dynamic_role_enabled ? local.role_arn_terraform_access : null + description = <<-EOT + Mapping of team Role ARN to map of account name to terraform action role ARN to assume -output "cicd_profiles" { - value = local.cicd_profiles - description = "A list of all SSO profiles used by cicd platforms" + For each team in `aws-teams`, look at every account and see if that team has access to the designated "apply" role. + If so, add an entry ` = "apply"` to the `terraform_access_map` entry for that team. + If not, see if it has access to the "plan" role, and if so, add a "plan" entry. + Otherwise, no entry is added. + EOT } -output "profiles_enabled" { - value = var.profiles_enabled - description = "Whether or not to enable profiles instead of roles for the backend" +output "terraform_role_name_map" { + value = local.dynamic_role_enabled ? var.terraform_role_name_map : null + description = "Mapping of Terraform action (plan or apply) to aws-team-role name to assume for that action" } resource "local_file" "account_info" { @@ -112,6 +118,41 @@ resource "local_file" "account_info" { account_profiles = local.account_profiles account_role_map = local.account_role_map namespace = module.this.namespace + source_profile = coalesce(var.aws_config_identity_profile_name, format("%s-identity", module.this.namespace)) }) filename = "${path.module}/account-info/${module.this.id}.sh" } + + +###################### +## Deprecated outputs +## These outputs are deprecated and will be removed in a future release +## As of this release, they return empty lists so as not to break old +## versions of account-map/modules/iam-roles and imposing an order +## on deploying new code vs applying the updated account-map +###################### + +output "helm_roles" { + value = local.empty_account_map + description = "OBSOLETE: dummy results returned to avoid breaking code that depends on this output" +} + +output "helm_profiles" { + value = local.empty_account_map + description = "OBSOLETE: dummy results returned to avoid breaking code that depends on this output" +} + +output "cicd_roles" { + value = local.empty_account_map + description = "OBSOLETE: dummy results returned to avoid breaking code that depends on this output" +} + +output "cicd_profiles" { + value = local.empty_account_map + description = "OBSOLETE: dummy results returned to avoid breaking code that depends on this output" +} + +###################### +## End of Deprecated outputs +## Please add new outputs above this section +###################### diff --git a/modules/account-map/remote-state.tf b/modules/account-map/remote-state.tf index a70b8025a..c9dfaa884 100644 --- a/modules/account-map/remote-state.tf +++ b/modules/account-map/remote-state.tf @@ -1,6 +1,6 @@ module "accounts" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.0.0" + version = "1.5.0" component = "account" privileged = true diff --git a/modules/account-map/variables.tf b/modules/account-map/variables.tf index 4eb1cfd62..7043247bd 100644 --- a/modules/account-map/variables.tf +++ b/modules/account-map/variables.tf @@ -11,31 +11,31 @@ variable "root_account_aws_name" { variable "root_account_account_name" { type = string default = "root" - description = "The stage name for the root account" + description = "The short name for the root account" } variable "identity_account_account_name" { type = string default = "identity" - description = "The stage name for the account holding primary IAM roles" + description = "The short name for the account holding primary IAM roles" } variable "dns_account_account_name" { type = string default = "dns" - description = "The stage name for the primary DNS account" + description = "The short name for the primary DNS account" } variable "artifacts_account_account_name" { type = string default = "artifacts" - description = "The stage name for the artifacts account" + description = "The short name for the artifacts account" } variable "audit_account_account_name" { type = string default = "audit" - description = "The stage name for the audit account" + description = "The short name for the audit account" } variable "iam_role_arn_template_template" { @@ -66,14 +66,39 @@ variable "profile_template" { EOT } -variable "global_environment_name" { +variable "profiles_enabled" { + type = bool + default = false + description = "Whether or not to enable profiles instead of roles for the backend. If true, profile must be set. If false, role_arn must be set." +} + +variable "aws_config_identity_profile_name" { type = string - default = "gbl" - description = "Global environment name" + default = null + description = "The AWS config profile name to use as `source_profile` for credentials." } -variable "profiles_enabled" { +variable "terraform_role_name_map" { + type = map(string) + description = "Mapping of Terraform action (plan or apply) to aws-team-role name to assume for that action" + default = { + plan = "planner" + apply = "terraform" + } +} + +variable "legacy_terraform_uses_admin" { type = bool + description = <<-EOT + If `true`, the legacy behavior of using the `admin` role rather than the `terraform` role in the + `root` and identity accounts will be preserved. + The default is to use the negations of the value of `terraform_dynamic_role_enabled`. + EOT + default = null +} + +variable "terraform_dynamic_role_enabled" { + type = bool + description = "If true, the IAM role Terraform will assume will depend on the identity of the user running terraform" default = false - description = "Whether or not to enable profiles instead of roles for the backend. If true, profile must be set. If false, role_arn must be set." } diff --git a/modules/account-map/versions.tf b/modules/account-map/versions.tf index cc73ffd35..98fe82089 100644 --- a/modules/account-map/versions.tf +++ b/modules/account-map/versions.tf @@ -1,10 +1,18 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.2.0" required_providers { aws = { source = "hashicorp/aws" version = ">= 4.9.0" } + local = { + source = "hashicorp/local" + version = ">= 1.3" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.10.0" + } } } diff --git a/modules/account-quotas/README.md b/modules/account-quotas/README.md index 6777f07b1..f17dd7aec 100644 --- a/modules/account-quotas/README.md +++ b/modules/account-quotas/README.md @@ -1,19 +1,26 @@ -# Component: `account-quotas` +--- +tags: + - component/account-quotas + - layer/foundation + - provider/aws +--- -This component is responsible for requesting service quota increases. We recommend -making requests here rather than in `account-settings` because `account-settings` -is a restricted component that can only be applied by SuperAdmin. +# Component: `account-quotas` +This component is responsible for requesting service quota increases. We recommend making requests here rather than in +`account-settings` because `account-settings` is a restricted component that can only be applied by SuperAdmin. ## Usage **Stack Level**: Global and Regional (depending on quota) -Global resources must be provisioned in `us-east-1`. Put them in the `gbl` stack, but set `region: us-east-1` in the `vars` section. +Global resources must be provisioned in `us-east-1`. Put them in the `gbl` stack, but set `region: us-east-1` in the +`vars` section. -You can refer to services either by their exact full name (e.g. `service_name: "Amazon Elastic Compute Cloud (Amazon EC2)"`) or by the -service code (e.g. `service_code: "ec2"`). Similarly, you can refer to quota names either by their exact full name -(e.g. `quota_name: "EC2-VPC Elastic IPs"`) or by the quota code (e.g. `quota_code: "L-0263D0A3"`). +You can refer to services either by their exact full name (e.g. +`service_name: "Amazon Elastic Compute Cloud (Amazon EC2)"`) or by the service code (e.g. `service_code: "ec2"`). +Similarly, you can refer to quota names either by their exact full name (e.g. `quota_name: "EC2-VPC Elastic IPs"`) or by +the quota code (e.g. `quota_code: "L-0263D0A3"`). You can find service codes and full names via the AWS CLI (be sure to use the correct region): @@ -21,17 +28,30 @@ You can find service codes and full names via the AWS CLI (be sure to use the co aws --region us-east-1 service-quotas list-services ``` -You can find quota codes and full names, and also whether the quotas are adjustable or global, via the AWS CLI, -but you will need the service code from the previous step: +You can find quota codes and full names, and also whether the quotas are adjustable or global, via the AWS CLI, but you +will need the service code from the previous step: ```bash aws --region us-east-1 service-quotas list-service-quotas --service-code ec2 ``` -If you make a request to raise a quota, the output will show the requested value as `value` while the request is pending. +If you make a request to raise a quota, the output will show the requested value as `value` while the request is +pending. + +### Special usage Notes + +Even though the Terraform will submit the support request, you may need to follow up with AWS support to get the request +approved, via the AWS console or email. + +#### Resources are destroyed on change + +Because the AWS API often returns default values rather than configured or applicable values for a given quota, we have +to ignore the value returned by the API or else face perpetual drift. To allow us to change the value in the future, +even though we are ignoring it, we encode the value in the resource key, so that a change of value will result in a new +resource being created and the old one being destroyed. Destroying the old resource has no actual effect (it does not +even close an open request), so it is safe to do. -Even though the Terraform will submit the support request, you may need to follow up with AWS support to get the request approved, -via the AWS console or email. +### Example Here's an example snippet for how to use this component. @@ -51,6 +71,7 @@ components: value: 10 ``` + ## Requirements @@ -92,8 +113,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -113,10 +132,18 @@ components: |------|-------------| | [quotas](#output\_quotas) | Full report on all service quotas managed by this component. | + ## References - [AWS Service Quotas](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) -- AWS CLI [command to list service codes](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/service-quotas/list-services.html): `aws service-quotas list-services` +- AWS CLI + [command to list service codes](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/service-quotas/list-services.html): + `aws service-quotas list-services` +- AWS CLI + [command to list service quotas](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/service-quotas/list-service-quotas.html) + `aws service-quotas list-service-quotas`. Note where it says "For some quotas, only the default values are available." +- [Medium article](https://medium.com/@jsonk/the-limit-does-not-exist-hidden-visibility-of-aws-service-limits-4b786f846bc0) + explaining how many AWS service limits are not available. [](https://cpco.io/component) diff --git a/modules/account-quotas/main.tf b/modules/account-quotas/main.tf index 0305efe81..1e40992c8 100644 --- a/modules/account-quotas/main.tf +++ b/modules/account-quotas/main.tf @@ -21,6 +21,26 @@ locals { quota_code = quota.quota_code != null ? quota.quota_code : data.aws_servicequotas_service_quota.by_name[k].quota_code value = quota.value } } + + # Because the API often returns default values rather than configured or applicable values, + # we have to ignore the value returned by the API or else face perpetual drift. + # To allow us to change the value in the future, even though we are ignoring it, + # we encode the value in the resource key, so that a change of value will + # result in a new resource being created and the old one being destroyed. + # Destroying the old resource has no actual effect, it does not even close + # an open request, so it is safe to do. + + quota_requests = { for k, quota in local.quotas_coded_map : + format("%v/%v/%v", quota.service_code, quota.quota_code, quota.value) => merge( + quota, { input_map_key = k } + ) + } + + quota_results = { for k, v in local.quota_requests : v.input_map_key => merge( + { for k, v in aws_servicequotas_service_quota.this[k] : k => v if k != "value" }, + { "value reported (may be inaccurate)" = aws_servicequotas_service_quota.this[k].value }, + { "value requested" = v.value } + ) } } data "aws_servicequotas_service" "by_name" { @@ -37,9 +57,15 @@ data "aws_servicequotas_service_quota" "by_name" { } resource "aws_servicequotas_service_quota" "this" { - for_each = local.quotas_coded_map + for_each = local.quota_requests quota_code = each.value.quota_code service_code = each.value.service_code value = each.value.value + + lifecycle { + # Literally about 50% of the time, the actual value set is not available, + # so the default value is reported instead, resulting in permanent drift. + ignore_changes = [value] + } } diff --git a/modules/account-quotas/outputs.tf b/modules/account-quotas/outputs.tf index 6258c97f3..48cd0feda 100644 --- a/modules/account-quotas/outputs.tf +++ b/modules/account-quotas/outputs.tf @@ -1,4 +1,4 @@ output "quotas" { - value = aws_servicequotas_service_quota.this + value = local.quota_results description = "Full report on all service quotas managed by this component." } diff --git a/modules/account-quotas/providers.tf b/modules/account-quotas/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/account-quotas/providers.tf +++ b/modules/account-quotas/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/account-settings/README.md b/modules/account-settings/README.md index 48206f889..e5fee8198 100644 --- a/modules/account-settings/README.md +++ b/modules/account-settings/README.md @@ -1,14 +1,23 @@ +--- +tags: + - component/account-settings + - layer/accounts + - provider/aws + - privileged +--- + # Component: `account-settings` -This component is responsible for provisioning account level settings: IAM password policy, AWS Account Alias, EBS encryption, and Service Quotas. +This component is responsible for provisioning account level settings: IAM password policy, AWS Account Alias, EBS +encryption, and Service Quotas. ## Usage **Stack Level**: Global -Here's an example snippet for how to use this component. It's suggested to apply this component to all accounts, -so create a file `stacks/catalog/account-settings.yaml` with the following content and then import -that file in each account's global stack (overriding any parameters as needed): +Here's an example snippet for how to use this component. It's suggested to apply this component to all accounts, so +create a file `stacks/catalog/account-settings.yaml` with the following content and then import that file in each +account's global stack (overriding any parameters as needed): ```yaml components: @@ -34,6 +43,27 @@ components: limit_amount: "3" limit_unit: GB time_unit: MONTHLY + notification: + - comparison_operator: GREATER_THAN + notification_type: FORECASTED + threshold_type: PERCENTAGE + threshold: 80 + subscribers: + - slack + - comparison_operator: GREATER_THAN + notification_type: FORECASTED + threshold_type: PERCENTAGE + # We generate two forecast notifications. This makes sure that notice is taken, + # and hopefully action can be taken to prevent going over budget. + threshold: 100 + subscribers: + - slack + - comparison_operator: GREATER_THAN + notification_type: ACTUAL + threshold_type: PERCENTAGE + threshold: 100 + subscribers: + - slack service_quotas_enabled: true service_quotas: - quota_name: Subnets per VPC @@ -47,6 +77,7 @@ components: value: null ``` + ## Requirements @@ -65,8 +96,8 @@ components: | Name | Source | Version | |------|--------|---------| -| [budgets](#module\_budgets) | cloudposse/budgets/aws | 0.1.0 | -| [iam\_account\_settings](#module\_iam\_account\_settings) | cloudposse/iam-account-settings/aws | 0.4.0 | +| [budgets](#module\_budgets) | cloudposse/budgets/aws | 0.2.1 | +| [iam\_account\_settings](#module\_iam\_account\_settings) | cloudposse/iam-account-settings/aws | 0.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [service\_quotas](#module\_service\_quotas) | cloudposse/service-quotas/aws | 0.1.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -95,8 +126,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -107,7 +136,6 @@ components: | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [root\_account\_tenant\_name](#input\_root\_account\_tenant\_name) | The tenant name for the root account | `string` | `null` | no | | [service\_quotas](#input\_service\_quotas) | A list of service quotas to manage or lookup.
To lookup the value of a service quota, set `value = null` and either `quota_code` or `quota_name`.
To manage a service quota, set `value` to a number. Service Quotas can only be managed via `quota_code`.
For a more specific example, see https://github.com/cloudposse/terraform-aws-service-quotas/blob/master/examples/complete/fixtures.us-east-2.tfvars. | `list(any)` | `[]` | no | | [service\_quotas\_enabled](#input\_service\_quotas\_enabled) | Whether or not this component should handle Service Quotas | `bool` | `false` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | @@ -120,8 +148,11 @@ components: |------|-------------| | [account\_alias](#output\_account\_alias) | Account alias | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/account-settings) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/account-settings) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/account-settings/budgets.tf b/modules/account-settings/budgets.tf index 29066cb7b..2a4093713 100644 --- a/modules/account-settings/budgets.tf +++ b/modules/account-settings/budgets.tf @@ -1,6 +1,6 @@ module "budgets" { source = "cloudposse/budgets/aws" - version = "0.1.0" + version = "0.2.1" enabled = module.this.enabled && var.budgets_enabled budgets = var.budgets diff --git a/modules/account-settings/main.tf b/modules/account-settings/main.tf index 2ee5cdabe..34e392613 100644 --- a/modules/account-settings/main.tf +++ b/modules/account-settings/main.tf @@ -8,7 +8,7 @@ resource "aws_ebs_encryption_by_default" "default" { # It also sets the account alias for the current account. module "iam_account_settings" { source = "cloudposse/iam-account-settings/aws" - version = "0.4.0" + version = "0.5.0" hard_expiry = true minimum_password_length = var.minimum_password_length diff --git a/modules/account-settings/providers.tf b/modules/account-settings/providers.tf index 54f0d0f04..ef923e10a 100644 --- a/modules/account-settings/providers.tf +++ b/modules/account-settings/providers.tf @@ -1,37 +1,19 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = var.import_role_arn == null ? (module.iam_roles.org_role_arn != null ? [true] : []) : ["import"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.org_role_arn) + role_arn = assume_role.value } } } module "iam_roles" { - source = "../account-map/modules/iam-roles" - privileged = true - global_tenant_name = var.root_account_tenant_name - context = module.this.context -} - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -variable "root_account_tenant_name" { - type = string - description = "The tenant name for the root account" - default = null + source = "../account-map/modules/iam-roles" + context = module.this.context } diff --git a/modules/account/README.md b/modules/account/README.md index 7fa7d663e..f3915df72 100644 --- a/modules/account/README.md +++ b/modules/account/README.md @@ -1,108 +1,400 @@ +--- +tags: + - component/account + - layer/accounts + - provider/aws + - privileged +--- + # Component: `account` -This component is responsible for provisioning the full account hierarchy along with Organizational Units (OUs). It includes the ability to associate Service Control Policies (SCPs) to the Organization, each Organizational Unit and account. +This component is responsible for provisioning the full account hierarchy along with Organizational Units (OUs). It +includes the ability to associate Service Control Policies (SCPs) to the Organization, each Organizational Unit and +account. + +> [!NOTE] +> +> Part of a [cold start](https://docs.cloudposse.com/layers/accounts/prepare-aws-organization/) so it has to be +> initially run with `SuperAdmin` role. -In addition, it enables [AWS IAM Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html), which helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze the resource-based policies in your AWS environment. For each instance of a resource that is shared outside of your account, Access Analyzer generates a finding. Findings include information about the access and the external principal that it is granted to. You can review findings to determine whether the access is intended and safe, or the access is unintended and a security risk. +In addition, it enables +[AWS IAM Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html), which helps +you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared +with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. +Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze +the resource-based policies in your AWS environment. For each instance of a resource that is shared outside of your +account, Access Analyzer generates a finding. Findings include information about the access and the external principal +that it is granted to. You can review findings to determine whether the access is intended and safe, or the access is +unintended and a security risk. ## Usage **Stack Level**: Global -**IMPORTANT**: Account names must not contain dashes. Doing so will lead to unpredictable resource names as a `-` is the default delimiter. Additionally, account names must be lower case alpha-numeric with no special characters. +**IMPORTANT**: Account Name building blocks (such as tenant, stage, environment) must not contain dashes. Doing so will +lead to unpredictable resource names as a `-` is the default delimiter. Additionally, account names must be lower case +alphanumeric with no special characters. For example: -Here is an example snippet for how to use this component. Include this snippet in the stack configuration for the management account -(typically `root`) in the management tenant/OU (usually something like `mgmt` or `core`) in the global region (`gbl`). You can insert -the content directly, or create a `stacks/catalog/account.yaml` file and import it from there. +| Key | Value | Correctness | +| ---------------- | --------------- | ----------- | +| **Tenant** | foo | βœ… | +| **Tenant** | foo-bar | ❌ | +| **Environment** | use1 | βœ… | +| **Environment** | us-east-1 | ❌ | +| **Account Name** | `core-identity` | βœ… | + +Here is an example snippet for how to use this component. Include this snippet in the stack configuration for the +management account (typically `root`) in the management tenant/OU (usually something like `mgmt` or `core`) in the +global region (`gbl`). You can insert the content directly, or create a `stacks/catalog/account.yaml` file and import it +from there. ```yaml components: terraform: account: + settings: + spacelift: + workspace_enabled: false + backend: + s3: + role_arn: null vars: + enabled: true account_email_format: aws+%s@example.net - account_iam_user_access_to_billing: DENY + account_iam_user_access_to_billing: ALLOW organization_enabled: true aws_service_access_principals: - cloudtrail.amazonaws.com + - guardduty.amazonaws.com + - ipam.amazonaws.com - ram.amazonaws.com + - securityhub.amazonaws.com + - servicequotas.amazonaws.com + - sso.amazonaws.com + - securityhub.amazonaws.com + - auditmanager.amazonaws.com enabled_policy_types: - SERVICE_CONTROL_POLICY - TAG_POLICY organization_config: - root_account_stage_name: root + root_account: + name: core-root + stage: root + tenant: core + tags: + eks: false accounts: [] organization: - service_control_policies: [] + service_control_policies: + - DenyEC2InstancesWithoutEncryptionInTransit organizational_units: - - name: data + - name: core accounts: - - name: proddata + - name: core-artifacts + tenant: core + stage: artifacts tags: - eks: true - - name: devdata + eks: false + - name: core-audit + tenant: core + stage: audit tags: - eks: true - - name: stagedata + eks: false + - name: core-auto + tenant: core + stage: auto tags: eks: true - service_control_policies: - - DenyLeavingOrganization - - name: platform - accounts: - - name: prodplatform + - name: core-corp + tenant: core + stage: corp tags: eks: true - - name: devplatform + - name: core-dns + tenant: core + stage: dns tags: - eks: true - - name: stageplatform + eks: false + - name: core-identity + tenant: core + stage: identity tags: - eks: true + eks: false + - name: core-network + tenant: core + stage: network + tags: + eks: false + - name: core-security + tenant: core + stage: security + tags: + eks: false service_control_policies: - DenyLeavingOrganization - - name: mgmt + - name: plat accounts: - - name: demo + - name: plat-dev + tenant: plat + stage: dev tags: eks: true - - name: audit - tags: - eks: false - - name: corp + - name: plat-sandbox + tenant: plat + stage: sandbox tags: eks: true - - name: security - tags: - eks: false - - name: identity + - name: plat-staging + tenant: plat + stage: staging tags: - eks: false - - name: network - tags: - eks: false - - name: dns - tags: - eks: false - - name: automation + eks: true + - name: plat-prod + tenant: plat + stage: prod tags: eks: true service_control_policies: - DenyLeavingOrganization service_control_policies_config_paths: - # These paths specify where to find the service control policies identified by SID in the service_control_policies sections above. - # The number such as "0.12.0" is the release number/tag of the service control policies repository, and you may want to - # update it to reflect the latest release. - - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/organization-policies.yaml" - - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/ec2-policies.yaml" - - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/cloudwatch-logs-policies.yaml" - - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/deny-all-policies.yaml" - - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/iam-policies.yaml" - - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/kms-policies.yaml" - - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/route53-policies.yaml" - - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/s3-policies.yaml" + # These paths specify where to find the service control policies identified by SID in the service_control_policies sections above. + - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/cloudwatch-logs-policies.yaml" + - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/deny-all-policies.yaml" + - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/iam-policies.yaml" + - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/kms-policies.yaml" + - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/organization-policies.yaml" + - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/route53-policies.yaml" + - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/s3-policies.yaml" + - "https://raw.githubusercontent.com/cloudposse/terraform-aws-service-control-policies/0.12.0/catalog/ec2-policies.yaml" +``` + +## First Time Organization Setup + +Your AWS Organization is managed by the `account` component, along with accounts and organizational units. + +However, because the AWS defaults for an Organization and its accounts are not exactly what we want, and there is no way +to change them via Terraform, we have to first provision the AWS Organization, then take some steps on the AWS console, +and then we can provision the rest. + +### Use AWS Console to create and set up the Organization + +Unfortunately, there are some tasks that need to be done via the console. Log into the AWS Console with the root (not +SuperAdmin) credentials you have saved in 1Password. + +#### Request an increase in the maximum number of accounts allowed + +> [!WARNING] +> +> Make sure your support plan for the _root_ account was upgraded to the "Business" level (or Higher). This is necessary +> to expedite the quota increase requests, which could take several days on a basic support plan. Without it, AWS +> support will claim that since we’re not currently utilizing any of the resources, so they do not want to approve the +> requests. AWS support is not aware of your other organization. If AWS still gives you problems, please escalate to +> your AWS TAM. + +1. From the region list, select "US East (N. Virginia) us-east-1". + +2. From the account dropdown menu, select "My Service Quotas". + +3. From the Sidebar, select "AWS Services". + +4. Type "org" in the search field under "AWS services" + +5. Click on "AWS Organizations" in the "Service" list + +6. Click on "Default maximum number of accounts", which should take you to a new view + +7. Click on "Request quota increase" on the right side of the view, which should pop us a request form + +8. At the bottom of the form, under "Change quota value", enter the number you decided on in the previous step (probably + "20") and click "Request" + +#### (Optional) Create templates to request other quota increases + +New accounts start with a low limit on the number of instances you can create. However, as you add accounts, and use +more instances, the numbers automatically adjust up. So you may or may not want to create a template to generate +automatic quota increase requests, depending on how many instances per account you expect to want to provision right +away. + +Create a +[Quota request template](https://docs.aws.amazon.com/servicequotas/latest/userguide/organization-templates.html) for the +organization. From the Sidebar, click "Quota request template" + +Add each EC2 quota increase request you want to make: + +1. Click "Add Quota" on the right side of the view + +2. Under "Region", select your default region (repeat with the backup region if you are using one) + +3. Under "Service", type "EC2" and select "Amazon Elastic Compute Cloud (Amazon EC2)" + +4. Under "Quota", find the quota you want to increase. The likely candidates are: + +5. type "stand" and select "Running On-Demand Standard (A, C, D, H, I, M, R, T, Z) Instances" + +6. type "stand" and select "All Standard (A, C, D, H, I, M, R, T, Z) Spot Instance Request" + +7. type "g i" and select "Running On-Demand G Instances" + +8. type "all g" and select "All G Spot Instance Requests" + +9. Under "Desired quota value" enter your desired default quota + +10. Click "Add" +After you have added all the templates, click "Enable" on the Quota request template screen to enable the templates. + +#### Enable resource sharing with AWS Organization + +[AWS Resource Access Manager (RAM)](https://docs.aws.amazon.com/ram/latest/userguide/what-is.html) lets you share your +resources with any AWS account or through AWS Organizations. + +
+ +If you have multiple AWS accounts, you can create resources centrally and use AWS RAM to share those resources with +other accounts. + +Resource sharing through AWS Organization will be used to share the Transit Gateway deployed in the `network` account +with other accounts to connect their VPCs to the shared Transit Gateway. + +This is a one-time manual step in the AWS Resource Access Manager console. When you share resources within your +organization, AWS RAM does not send invitations to principals. Principals in your organization get access to shared +resources without exchanging invitations. + +To enable resource sharing with AWS Organization via AWS Management Console + +- Open the Settings page of AWS Resource Access Manager console at + [https://console.aws.amazon.com/ram/home#Settings](https://console.aws.amazon.com/ram/home#Settings) + +- Choose "Enable sharing with AWS Organizations" + +To enable resource sharing with AWS Organization via AWS CLI + +``` + √ . [xamp-SuperAdmin] (HOST) infra ⨠ aws ram enable-sharing-with-aws-organization +{ + "returnValue": true +} +``` + +For more information, see: + +- [https://docs.aws.amazon.com/ram/latest/userguide/what-is.html](https://docs.aws.amazon.com/ram/latest/userguide/what-is.html) + +- [https://docs.aws.amazon.com/ram/latest/userguide/getting-started-sharing.html](https://docs.aws.amazon.com/ram/latest/userguide/getting-started-sharing.html) + +- [https://docs.aws.amazon.com/organizations/latest/userguide/services-that-can-integrate-ram.html](https://docs.aws.amazon.com/organizations/latest/userguide/services-that-can-integrate-ram.html) + +### Import the organization into Terraform using the `account` component + +After we are done with the above ClickOps and the Service Quota Increase for maximum number of accounts has been +granted, we can then do the rest via Terraform. + +In the Geodesic shell, as SuperAdmin, execute the following command to get the AWS Organization ID that will be used to +import the organization: + +``` +aws organizations describe-organization +``` + +From the output, identify the _organization-id_: + +``` +{ + "Organization": { + "Id": "o-7qcakq6zxw", + "Arn": "arn:aws:organizations:: + ... +``` + +Using the example above, the _organization-id_ is o-7qcakq6zxw. + +In the Geodesic shell, as SuperAdmin, execute the following command to import the AWS Organization, changing the stack +name `core-gbl-root` if needed, to reflect the stack where the organization management account is defined, and changing +the last argument to reflect the _organization-id_ from the output of the previous command. + +``` +atmos terraform import account --stack core-gbl-root 'aws_organizations_organization.this[0]' 'o-7qcakq6zxw' ``` +### Provision AWS OUs and Accounts using the `account` component + +AWS accounts and organizational units are generated dynamically by the `terraform/account` component using the +configuration in the `gbl-root` stack. + +> [!IMPORTANT] +> +> In the rare case where you will need to be enabling non-default AWS Regions, temporarily comment out the +> `DenyRootAccountAccess` service control policy setting in `gbl-root.yaml`. You will restore it later, after enabling +> the optional Regions. See related: +> [Decide on Opting Into Non-default Regions](https://docs.cloudposse.com/layers/network/design-decisions/decide-on-opting-into-non-default-regions/) + +> [!TIP] +> +> #### You must wait until your quota increase request has been granted +> +> If you try to create the accounts before the quota increase is granted, you can expect to see failures like +> `ACCOUNT_NUMBER_LIMIT_EXCEEDED`. + +In the Geodesic shell, execute the following commands to provision AWS Organizational Units and AWS accounts: + +``` +atmos terraform apply account --stack gbl-root +``` + +Review the Terraform plan, _**ensure that no new organization will be created**_ (look for +`aws_organizations_organization.this[0]`), type "yes" to approve and apply. This creates the AWS organizational units +and AWS accounts. + +### Configure root account credentials for each account + +Note: unless you need to enable non-default AWS regions (see next step), this step can be done later or in parallel with +other steps, for example while waiting for Terraform to create resources. + +**For** _**each**_ **new account:** + +1. Perform a password reset by attempting to [log in to the AWS console](https://signin.aws.amazon.com/signin) as a + "root user", using that account's email address, and then clicking the "Forgot password?" link. You will receive a + password reset link via email, which should be forwarded to the shared Slack channel for automated messages. Click + the link and enter a new password. (Use 1Password or [Random.org](https://www.random.org/passwords) to create a + password 26-38 characters long, including at least 3 of each class of character: lower case, uppercase, digit, and + symbol. You may need to manually combine or add to the generated password to ensure 3 symbols and digits are + present.) Save the email address and generated password as web login credentials in 1Password. While you are at it, + save the account number in a separate field. + +2. Log in using the new password, choose "My Security Credentials" from the account dropdown menu and set up + Multi-Factor Authentication (MFA) to use a Virtual MFA device. Save the MFA TOTP key in 1Password by using + 1Password's TOTP field and built-in screen scanner. Also, save the Virtual MFA ARN (sometimes shown as "serial + number"). + +3. While logged in, enable optional regions as described in the next step, if needed. + +4. (Optional, but highly recommended): [Unsubscribe](https://pages.awscloud.com/communication-preferences.html) the + account's email address from all marketing emails. + +### (Optional) Enable regions + +Most AWS regions are enabled by default. If you are using a region that is not enabled by default (such as Middle +East/Bahrain), you need to take extra steps. + +1. While logged in using root credentials (see the previous step), in the account dropdown menu, select "My Account" to + get to the [Billing home page](https://console.aws.amazon.com/billing/home?#/account). + +2. In the "AWS Regions" section, enable the regions you want to enable. + +3. Go to the IAM [account settings page](https://console.aws.amazon.com/iam/home?#/account_settings) and edit the STS + Global endpoint to create session tokens valid in all AWS regions. + +You will need to wait a few minutes for the regions to be enabled before you can proceed to the next step. Until they +are enabled, you may get what look like AWS authentication or permissions errors. + +After enabling the regions in all accounts, re-enable the `DenyRootAccountAccess` service control policy setting in +`gbl-root.yaml` and rerun + +``` +atmos terraform apply account --stack gbl-root +``` + + ## Requirements @@ -124,7 +416,7 @@ components: | [accounts\_service\_control\_policies](#module\_accounts\_service\_control\_policies) | cloudposse/service-control-policies/aws | 0.9.2 | | [organization\_service\_control\_policies](#module\_organization\_service\_control\_policies) | cloudposse/service-control-policies/aws | 0.9.2 | | [organizational\_units\_service\_control\_policies](#module\_organizational\_units\_service\_control\_policies) | cloudposse/service-control-policies/aws | 0.9.2 | -| [service\_control\_policy\_statements\_yaml\_config](#module\_service\_control\_policy\_statements\_yaml\_config) | cloudposse/config/yaml | 1.0.1 | +| [service\_control\_policy\_statements\_yaml\_config](#module\_service\_control\_policy\_statements\_yaml\_config) | cloudposse/config/yaml | 1.0.2 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -134,6 +426,7 @@ components: | [aws_organizations_account.organization_accounts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/organizations_account) | resource | | [aws_organizations_account.organizational_units_accounts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/organizations_account) | resource | | [aws_organizations_organization.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/organizations_organization) | resource | +| [aws_organizations_organizational_unit.child](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/organizations_organizational_unit) | resource | | [aws_organizations_organizational_unit.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/organizations_organizational_unit) | resource | | [aws_organizations_organization.existing](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/organizations_organization) | data source | @@ -195,8 +488,11 @@ components: | [organizational\_unit\_names\_organizational\_unit\_scp\_arns](#output\_organizational\_unit\_names\_organizational\_unit\_scp\_arns) | Map of OU names to SCP ARNs | | [organizational\_unit\_names\_organizational\_unit\_scp\_ids](#output\_organizational\_unit\_names\_organizational\_unit\_scp\_ids) | Map of OU names to SCP IDs | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/account) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/account) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/account/main.tf b/modules/account/main.tf index fcad474b0..114772d0d 100644 --- a/modules/account/main.tf +++ b/modules/account/main.tf @@ -3,8 +3,10 @@ locals { organization = lookup(var.organization_config, "organization", {}) # Organizational Units list and map configuration - organizational_units = lookup(var.organization_config, "organizational_units", []) - organizational_units_map = { for ou in local.organizational_units : ou.name => ou } + organizational_units = lookup(var.organization_config, "organizational_units", []) + organizational_units_map = { for ou in local.organizational_units : ou.name => merge(ou, { + parent_ou = contains(keys(ou), "parent_ou") ? ou.parent_ou : "none" + }) } # Organization's Accounts list and map configuration organization_accounts = lookup(var.organization_config, "accounts", []) @@ -13,7 +15,7 @@ locals { # Organizational Units' Accounts list and map configuration organizational_units_accounts = flatten([ for ou in local.organizational_units : [ - for account in lookup(ou, "accounts", []) : merge(account, { "ou" = ou.name, "account_email_format" = lookup(ou, "account_email_format", var.account_email_format) }) + for account in lookup(ou, "accounts", []) : merge({ "ou" = ou.name, "account_email_format" = lookup(ou, "account_email_format", var.account_email_format), parent_ou = contains(keys(ou), "parent_ou") ? ou.parent_ou : "none" }, account) ] ]) organizational_units_accounts_map = { for acc in local.organizational_units_accounts : acc.name => acc } @@ -22,13 +24,22 @@ locals { all_accounts = concat(local.organization_accounts, local.organizational_units_accounts) # List of Organizational Unit names - organizational_unit_names = values(aws_organizations_organizational_unit.this)[*]["name"] + organizational_unit_names = concat( + values(aws_organizations_organizational_unit.this)[*]["name"], + values(aws_organizations_organizational_unit.child)[*]["name"] + ) # List of Organizational Unit ARNs - organizational_unit_arns = values(aws_organizations_organizational_unit.this)[*]["arn"] + organizational_unit_arns = concat( + values(aws_organizations_organizational_unit.this)[*]["arn"], + values(aws_organizations_organizational_unit.child)[*]["arn"] + ) # List of Organizational Unit IDs - organizational_unit_ids = values(aws_organizations_organizational_unit.this)[*]["id"] + organizational_unit_ids = concat( + values(aws_organizations_organizational_unit.this)[*]["id"], + values(aws_organizations_organizational_unit.child)[*]["id"] + ) # Map of account names to OU names (used for lookup `parent_id` for each account under an OU) account_names_organizational_unit_names_map = length(local.organizational_units) > 0 ? merge( @@ -80,7 +91,7 @@ locals { # Convert all Service Control Policy statements from YAML config to Terraform list module "service_control_policy_statements_yaml_config" { source = "cloudposse/config/yaml" - version = "1.0.1" + version = "1.0.2" list_config_local_base_path = path.module list_config_paths = var.service_control_policies_config_paths @@ -127,19 +138,26 @@ resource "aws_organizations_account" "organization_accounts" { } } -# Provision Organizational Units +# Provision Organizational Units w/o Child Orgs resource "aws_organizations_organizational_unit" "this" { - for_each = local.organizational_units_map + for_each = { for key, value in local.organizational_units_map : key => value if value.parent_ou == "none" } name = each.value.name parent_id = local.organization_root_account_id } +# Provision Child Organizational Units +resource "aws_organizations_organizational_unit" "child" { + for_each = { for key, value in local.organizational_units_map : key => value if value.parent_ou != "none" } + name = each.value.name + parent_id = aws_organizations_organizational_unit.this[each.value.parent_ou].id +} + # Provision Accounts connected to Organizational Units resource "aws_organizations_account" "organizational_units_accounts" { for_each = local.organizational_units_accounts_map name = each.value.name - parent_id = aws_organizations_organizational_unit.this[local.account_names_organizational_unit_names_map[each.value.name]].id - email = format(each.value.account_email_format, each.value.name) + parent_id = each.value.parent_ou != "none" ? aws_organizations_organizational_unit.child[each.value.ou].id : aws_organizations_organizational_unit.this[local.account_names_organizational_unit_names_map[each.value.name]].id + email = try(format(each.value.account_email_format, each.value.name), each.value.account_email_format) iam_user_access_to_billing = var.account_iam_user_access_to_billing tags = merge(module.this.tags, try(each.value.tags, {}), { Name : each.value.name }) diff --git a/modules/acm/README.md b/modules/acm/README.md index ee178632d..47891fc0b 100644 --- a/modules/acm/README.md +++ b/modules/acm/README.md @@ -1,6 +1,26 @@ +--- +tags: + - component/acm + - layer/network + - provider/aws +--- + # Component: `acm` -This component is responsible for requesting an ACM certificate for a domain and adding a CNAME record to the DNS zone to complete certificate validation. +This component is responsible for requesting an ACM certificate for a domain and adding a CNAME record to the DNS zone +to complete certificate validation. + +The ACM component is to manage an unlimited number of certificates, predominantly for vanity domains. While the +[dns-primary](https://docs.cloudposse.com/components/library/aws/dns-primary) component has the ability to generate ACM +certificates, it is very opinionated and can only manage one zone. In reality, companies have many branded domains +associated with a load balancer, so we need to be able to generate more complicated certificates. + +We have, as a convenience, the ability to create an ACM certificate as part of creating a DNS zone, whether primary or +delegated. That convenience is limited to creating `example.com` and `*.example.com` when creating a zone for +`example.com`. For example, Acme has delegated `acct.acme.com` and in addition to `*.acct.acme.com` needed an ACM +certificate for `*.usw2.acct.acme.com`, so we use the ACM component to provision that, rather than extend the DNS +primary or delegated components to take a list of additional certificates. Both are different views on the Single +Responsibility Principle. ## Usage @@ -46,27 +66,29 @@ components: certificate_authority_component_key: subordinate ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.9.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [acm](#module\_acm) | cloudposse/acm-request-certificate/aws | 0.16.0 | +| [acm](#module\_acm) | cloudposse/acm-request-certificate/aws | 0.16.3 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [private\_ca](#module\_private\_ca) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [private\_ca](#module\_private\_ca) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -90,13 +112,15 @@ components: | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dns\_delegated\_component\_name](#input\_dns\_delegated\_component\_name) | Use this component name to read from the remote state to get the dns\_delegated zone ID | `string` | `"dns-delegated"` | no | +| [dns\_delegated\_environment\_name](#input\_dns\_delegated\_environment\_name) | Use this environment name to read from the remote state to get the dns\_delegated zone ID | `string` | `"gbl"` | no | +| [dns\_delegated\_stage\_name](#input\_dns\_delegated\_stage\_name) | Use this stage name to read from the remote state to get the dns\_delegated zone ID | `string` | `null` | no | | [dns\_private\_zone\_enabled](#input\_dns\_private\_zone\_enabled) | Whether to set the zone to public or private | `bool` | `false` | no | -| [domain\_name](#input\_domain\_name) | Root domain name | `string` | n/a | yes | +| [domain\_name](#input\_domain\_name) | Root domain name | `string` | `""` | no | +| [domain\_name\_prefix](#input\_domain\_name\_prefix) | Root domain name prefix to use with DNS delegated remote state | `string` | `""` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -108,6 +132,7 @@ components: | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [subject\_alternative\_names](#input\_subject\_alternative\_names) | A list of domains that should be SANs in the issued certificate | `list(string)` | `[]` | no | +| [subject\_alternative\_names\_prefixes](#input\_subject\_alternative\_names\_prefixes) | A list of domain prefixes to use with DNS delegated remote state that should be SANs in the issued certificate | `list(string)` | `[]` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [validation\_method](#input\_validation\_method) | Method to use for validation, DNS or EMAIL | `string` | `"DNS"` | no | @@ -118,11 +143,15 @@ components: | Name | Description | |------|-------------| | [arn](#output\_arn) | The ARN of the certificate | +| [domain\_name](#output\_domain\_name) | Certificate domain name | | [domain\_validation\_options](#output\_domain\_validation\_options) | CNAME records that are added to the DNS zone to complete certificate validation | | [id](#output\_id) | The ID of the certificate | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/acm) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/acm) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/acm/main.tf b/modules/acm/main.tf index 1ac797f07..1b1d1e67c 100644 --- a/modules/acm/main.tf +++ b/modules/acm/main.tf @@ -1,28 +1,35 @@ locals { enabled = module.this.enabled + domain_suffix = format("%s.%s", var.environment, module.dns_delegated.outputs.default_domain_name) + + domain_name = length(var.domain_name) > 0 ? var.domain_name : format("%s.%s", var.domain_name_prefix, local.domain_suffix) + + subject_alternative_names = concat(var.subject_alternative_names, formatlist("%s.${local.domain_suffix}", var.subject_alternative_names_prefixes)) + all_sans = distinct(concat([format("*.%s", local.domain_name)], local.subject_alternative_names)) + private_enabled = local.enabled && var.dns_private_zone_enabled private_ca_enabled = local.private_enabled && var.certificate_authority_enabled } data "aws_route53_zone" "default" { - count = local.enabled ? 1 : 0 - name = var.zone_name + count = local.enabled && var.process_domain_validation_options ? 1 : 0 + name = length(var.zone_name) > 0 ? var.zone_name : module.dns_delegated.outputs.default_domain_name private_zone = local.private_enabled } # https://github.com/cloudposse/terraform-aws-acm-request-certificate module "acm" { source = "cloudposse/acm-request-certificate/aws" - version = "0.16.0" + version = "0.16.3" certificate_authority_arn = local.private_ca_enabled ? module.private_ca[0].outputs.private_ca[var.certificate_authority_component_key].certificate_authority.arn : null validation_method = local.private_ca_enabled ? null : var.validation_method - domain_name = var.domain_name + domain_name = local.domain_name process_domain_validation_options = var.process_domain_validation_options ttl = 300 - subject_alternative_names = concat([format("*.%s", var.domain_name)], var.subject_alternative_names) + subject_alternative_names = local.all_sans zone_id = join("", data.aws_route53_zone.default.*.zone_id) context = module.this.context @@ -31,9 +38,9 @@ module "acm" { resource "aws_ssm_parameter" "acm_arn" { count = local.enabled ? 1 : 0 - name = "/acm/${var.domain_name}" + name = "/acm/${local.domain_name}" value = module.acm.arn - description = format("ACM certificate ARN for '%s' domain", var.domain_name) + description = format("ACM certificate ARN for '%s' domain", local.domain_name) type = "String" overwrite = true diff --git a/modules/acm/outputs.tf b/modules/acm/outputs.tf index b6d753a08..3a6457527 100644 --- a/modules/acm/outputs.tf +++ b/modules/acm/outputs.tf @@ -12,3 +12,8 @@ output "domain_validation_options" { value = module.acm.domain_validation_options description = "CNAME records that are added to the DNS zone to complete certificate validation" } + +output "domain_name" { + value = local.enabled ? local.domain_name : null + description = "Certificate domain name" +} diff --git a/modules/acm/providers.tf b/modules/acm/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/acm/providers.tf +++ b/modules/acm/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/acm/remote-state.tf b/modules/acm/remote-state.tf index f00321dc2..937168330 100644 --- a/modules/acm/remote-state.tf +++ b/modules/acm/remote-state.tf @@ -1,6 +1,6 @@ module "private_ca" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" count = local.private_ca_enabled ? 1 : 0 @@ -10,3 +10,14 @@ module "private_ca" { context = module.this.context } + +module "dns_delegated" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.dns_delegated_component_name + stage = var.dns_delegated_stage_name + environment = var.dns_delegated_environment_name + + context = module.this.context +} diff --git a/modules/acm/variables.tf b/modules/acm/variables.tf index 4b8212cd8..9a37ae76f 100644 --- a/modules/acm/variables.tf +++ b/modules/acm/variables.tf @@ -6,6 +6,13 @@ variable "region" { variable "domain_name" { type = string description = "Root domain name" + default = "" +} + +variable "domain_name_prefix" { + type = string + description = "Root domain name prefix to use with DNS delegated remote state" + default = "" } variable "zone_name" { @@ -35,6 +42,12 @@ variable "subject_alternative_names" { description = "A list of domains that should be SANs in the issued certificate" } +variable "subject_alternative_names_prefixes" { + type = list(string) + default = [] + description = "A list of domain prefixes to use with DNS delegated remote state that should be SANs in the issued certificate" +} + variable "dns_private_zone_enabled" { type = bool description = "Whether to set the zone to public or private" @@ -70,3 +83,21 @@ variable "certificate_authority_component_key" { default = null description = "Use this component key e.g. `root` or `mgmt` to read from the remote state to get the certificate_authority_arn if using an authority type of SUBORDINATE" } + +variable "dns_delegated_stage_name" { + type = string + default = null + description = "Use this stage name to read from the remote state to get the dns_delegated zone ID" +} + +variable "dns_delegated_environment_name" { + type = string + default = "gbl" + description = "Use this environment name to read from the remote state to get the dns_delegated zone ID" +} + +variable "dns_delegated_component_name" { + type = string + default = "dns-delegated" + description = "Use this component name to read from the remote state to get the dns_delegated zone ID" +} diff --git a/modules/acm/versions.tf b/modules/acm/versions.tf index e89eb16ed..cc73ffd35 100644 --- a/modules/acm/versions.tf +++ b/modules/acm/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.9.0" } } } diff --git a/modules/alb/README.md b/modules/alb/README.md index b0298df12..cedac8a82 100644 --- a/modules/alb/README.md +++ b/modules/alb/README.md @@ -1,6 +1,14 @@ +--- +tags: + - component/alb + - layer/ecs + - provider/aws +--- + # Component: `alb` -This component is responsible for provisioning a generic Application Load Balancer. It depends on the `vpc` and `dns-delegated` components. +This component is responsible for provisioning a generic Application Load Balancer. It depends on the `vpc` and +`dns-delegated` components. ## Usage @@ -17,13 +25,14 @@ components: health_check_path: /api/healthz ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [local](#requirement\_local) | >= 2.1 | ## Providers @@ -34,11 +43,12 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [alb](#module\_alb) | cloudposse/alb/aws | 1.4.0 | +| [acm](#module\_acm) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [alb](#module\_alb) | cloudposse/alb/aws | 1.11.1 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [remote\_dns](#module\_remote\_dns) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [remote\_vpc](#module\_remote\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -51,6 +61,7 @@ No resources. | [access\_logs\_enabled](#input\_access\_logs\_enabled) | A boolean flag to enable/disable access\_logs | `bool` | `true` | no | | [access\_logs\_prefix](#input\_access\_logs\_prefix) | The S3 log bucket prefix | `string` | `""` | no | | [access\_logs\_s3\_bucket\_id](#input\_access\_logs\_s3\_bucket\_id) | An external S3 Bucket name to store access logs in. If specified, no logging bucket will be created. | `string` | `null` | no | +| [acm\_component\_name](#input\_acm\_component\_name) | Atmos `acm` component name | `string` | `"acm"` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [alb\_access\_logs\_s3\_bucket\_force\_destroy](#input\_alb\_access\_logs\_s3\_bucket\_force\_destroy) | A boolean that indicates all objects should be deleted from the ALB access logs S3 bucket so that the bucket can be destroyed without error | `bool` | `false` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | @@ -60,6 +71,9 @@ No resources. | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [deregistration\_delay](#input\_deregistration\_delay) | The amount of time to wait in seconds before changing the state of a deregistering target to unused | `number` | `15` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dns\_acm\_enabled](#input\_dns\_acm\_enabled) | If `true`, use the ACM ARN created by the given `dns-delegated` component. Otherwise, use the ACM ARN created by the given `acm` component. | `bool` | `false` | no | +| [dns\_delegated\_component\_name](#input\_dns\_delegated\_component\_name) | Atmos `dns-delegated` component name | `string` | `"dns-delegated"` | no | +| [dns\_delegated\_environment\_name](#input\_dns\_delegated\_environment\_name) | `dns-delegated` component environment name | `string` | `null` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [health\_check\_healthy\_threshold](#input\_health\_check\_healthy\_threshold) | The number of consecutive health checks successes required before considering an unhealthy target healthy | `number` | `2` | no | @@ -79,11 +93,9 @@ No resources. | [https\_ingress\_cidr\_blocks](#input\_https\_ingress\_cidr\_blocks) | List of CIDR blocks to allow in HTTPS security group | `list(string)` |
[
"0.0.0.0/0"
]
| no | | [https\_ingress\_prefix\_list\_ids](#input\_https\_ingress\_prefix\_list\_ids) | List of prefix list IDs for allowing access to HTTPS ingress security group | `list(string)` | `[]` | no | | [https\_port](#input\_https\_port) | The port for the HTTPS listener | `number` | `443` | no | -| [https\_ssl\_policy](#input\_https\_ssl\_policy) | The name of the SSL Policy for the listener | `string` | `"ELBSecurityPolicy-TLS-1-1-2017-01"` | no | +| [https\_ssl\_policy](#input\_https\_ssl\_policy) | The name of the SSL Policy for the listener | `string` | `"ELBSecurityPolicy-TLS13-1-2-2021-06"` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | | [idle\_timeout](#input\_idle\_timeout) | The time in seconds that the connection is allowed to be idle | `number` | `60` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [internal](#input\_internal) | A boolean flag to determine whether the ALB should be internal | `bool` | `false` | no | | [ip\_address\_type](#input\_ip\_address\_type) | The type of IP addresses used by the subnets for your load balancer. The possible values are `ipv4` and `dualstack`. | `string` | `"ipv4"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | @@ -103,6 +115,7 @@ No resources. | [target\_group\_protocol](#input\_target\_group\_protocol) | The protocol for the default target group HTTP or HTTPS | `string` | `"HTTP"` | no | | [target\_group\_target\_type](#input\_target\_group\_target\_type) | The type (`instance`, `ip` or `lambda`) of targets that can be registered with the target group | `string` | `"ip"` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | Atmos `vpc` component name | `string` | `"vpc"` | no | ## Outputs @@ -121,10 +134,11 @@ No resources. | [listener\_arns](#output\_listener\_arns) | A list of all the listener ARNs | | [security\_group\_id](#output\_security\_group\_id) | The security group ID of the ALB | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/alb) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/alb) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/alb/main.tf b/modules/alb/main.tf index 81af7c303..2ccb5e15f 100644 --- a/modules/alb/main.tf +++ b/modules/alb/main.tf @@ -1,17 +1,36 @@ +locals { + dns_delegated_outputs = module.dns_delegated.outputs + dns_delegated_default_domain_name = local.dns_delegated_outputs.default_domain_name + dns_delegated_certificate = local.dns_delegated_outputs.certificate + dns_delegated_certificate_obj = lookup(local.dns_delegated_certificate, local.dns_delegated_default_domain_name, {}) + dns_delegated_certificate_arn = lookup(local.dns_delegated_certificate_obj, "arn", "") + + certificate_arn = var.dns_acm_enabled ? module.acm.outputs.arn : local.dns_delegated_certificate_arn +} + module "alb" { source = "cloudposse/alb/aws" - version = "1.4.0" + version = "1.11.1" - vpc_id = module.remote_vpc.outputs.vpc_id - subnet_ids = module.remote_vpc.outputs.public_subnet_ids - certificate_arn = module.remote_dns.outputs.certificate.arn + vpc_id = module.vpc.outputs.vpc_id + subnet_ids = module.vpc.outputs.public_subnet_ids + certificate_arn = local.certificate_arn internal = var.internal + http_port = var.http_port + http_ingress_cidr_blocks = var.http_ingress_cidr_blocks + http_ingress_prefix_list_ids = var.http_ingress_prefix_list_ids + https_port = var.https_port + https_ingress_cidr_blocks = var.https_ingress_cidr_blocks + https_ingress_prefix_list_ids = var.https_ingress_prefix_list_ids http_enabled = var.http_enabled https_enabled = var.https_enabled http2_enabled = var.http2_enabled http_redirect = var.http_redirect + https_ssl_policy = var.https_ssl_policy access_logs_enabled = var.access_logs_enabled + access_logs_prefix = var.access_logs_prefix + access_logs_s3_bucket_id = var.access_logs_s3_bucket_id alb_access_logs_s3_bucket_force_destroy = var.alb_access_logs_s3_bucket_force_destroy cross_zone_load_balancing_enabled = var.cross_zone_load_balancing_enabled idle_timeout = var.idle_timeout @@ -19,14 +38,18 @@ module "alb" { deletion_protection_enabled = var.deletion_protection_enabled deregistration_delay = var.deregistration_delay health_check_path = var.health_check_path + health_check_port = var.health_check_port health_check_timeout = var.health_check_timeout health_check_healthy_threshold = var.health_check_healthy_threshold health_check_unhealthy_threshold = var.health_check_unhealthy_threshold health_check_interval = var.health_check_interval health_check_matcher = var.health_check_matcher target_group_port = var.target_group_port + target_group_protocol = var.target_group_protocol + target_group_name = var.target_group_name target_group_target_type = var.target_group_target_type stickiness = var.stickiness + lifecycle_rule_enabled = var.lifecycle_rule_enabled context = module.this.context } diff --git a/modules/alb/providers.tf b/modules/alb/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/alb/providers.tf +++ b/modules/alb/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/alb/remote-state.tf b/modules/alb/remote-state.tf index 6fb1e0e68..d884a3f42 100644 --- a/modules/alb/remote-state.tf +++ b/modules/alb/remote-state.tf @@ -1,17 +1,46 @@ -module "remote_vpc" { +module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" - component = "vpc" + component = var.vpc_component_name context = module.this.context } -module "remote_dns" { +module "dns_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" - component = "dns-delegated" + component = var.dns_delegated_component_name + environment = coalesce(var.dns_delegated_environment_name, module.iam_roles.global_environment_name) + + bypass = var.dns_acm_enabled + + # Ignore errors if component doesn't exist + ignore_errors = true + + defaults = { + default_domain_name = "" + certificate = {} + } + + context = module.this.context +} + +module "acm" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.acm_component_name + + bypass = !var.dns_acm_enabled + + # Ignore errors if component doesn't exist + ignore_errors = true + + defaults = { + arn = "" + } context = module.this.context } diff --git a/modules/alb/variables.tf b/modules/alb/variables.tf index 4b4a2470e..23f244c25 100644 --- a/modules/alb/variables.tf +++ b/modules/alb/variables.tf @@ -66,7 +66,7 @@ variable "https_ingress_prefix_list_ids" { variable "https_ssl_policy" { type = string description = "The name of the SSL Policy for the listener" - default = "ELBSecurityPolicy-TLS-1-1-2017-01" + default = "ELBSecurityPolicy-TLS13-1-2-2021-06" } variable "access_logs_prefix" { @@ -210,3 +210,32 @@ variable "stickiness" { default = null } +variable "vpc_component_name" { + type = string + default = "vpc" + description = "Atmos `vpc` component name" +} + +variable "dns_delegated_component_name" { + type = string + default = "dns-delegated" + description = "Atmos `dns-delegated` component name" +} + +variable "dns_delegated_environment_name" { + type = string + default = null + description = "`dns-delegated` component environment name" +} + +variable "acm_component_name" { + type = string + default = "acm" + description = "Atmos `acm` component name" +} + +variable "dns_acm_enabled" { + type = bool + default = false + description = "If `true`, use the ACM ARN created by the given `dns-delegated` component. Otherwise, use the ACM ARN created by the given `acm` component." +} diff --git a/modules/alb/versions.tf b/modules/alb/versions.tf index 56a1e6c82..757d32d5a 100644 --- a/modules/alb/versions.tf +++ b/modules/alb/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } local = { source = "hashicorp/local" diff --git a/modules/amplify/README.md b/modules/amplify/README.md new file mode 100644 index 000000000..c53819b76 --- /dev/null +++ b/modules/amplify/README.md @@ -0,0 +1,239 @@ +--- +tags: + - component/amplify + - layer/unassigned + - provider/aws +--- + +# Component: `amplify` + +This component is responsible for provisioning AWS Amplify apps, backend environments, branches, domain associations, +and webhooks. + +## Usage + +**Stack Level**: Regional + +Here's an example for how to use this component: + +```yaml +# stacks/catalog/amplify/defaults.yaml +components: + terraform: + amplify/defaults: + metadata: + type: abstract + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + # https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html + github_personal_access_token_secret_path: "/amplify/github_personal_access_token" + platform: "WEB" + enable_auto_branch_creation: false + enable_basic_auth: false + enable_branch_auto_build: true + enable_branch_auto_deletion: false + iam_service_role_enabled: false + environment_variables: {} + dns_delegated_component_name: "dns-delegated" + dns_delegated_environment_name: "gbl" +``` + +```yaml +# stacks/catalog/amplify/example.yaml +import: + - catalog/amplify/defaults + +components: + terraform: + amplify/example: + metadata: + # Point to the Terraform component + component: amplify + inherits: + # Inherit the default settings + - amplify/defaults + vars: + name: "example" + description: "example Amplify App" + repository: "https://github.com/cloudposse/amplify-test2" + platform: "WEB_COMPUTE" + enable_auto_branch_creation: false + enable_basic_auth: false + enable_branch_auto_build: true + enable_branch_auto_deletion: false + iam_service_role_enabled: true + # https://docs.aws.amazon.com/amplify/latest/userguide/ssr-CloudWatch-logs.html + iam_service_role_actions: + - "logs:CreateLogStream" + - "logs:CreateLogGroup" + - "logs:DescribeLogGroups" + - "logs:PutLogEvents" + custom_rules: [] + auto_branch_creation_patterns: [] + environment_variables: + NEXT_PRIVATE_STANDALONE: false + NEXT_PUBLIC_TEST: test + _LIVE_UPDATES: '[{"pkg":"node","type":"nvm","version":"16"},{"pkg":"next-version","type":"internal","version":"13.1.1"}]' + environments: + main: + branch_name: "main" + enable_auto_build: true + backend_enabled: false + enable_performance_mode: false + enable_pull_request_preview: false + framework: "Next.js - SSR" + stage: "PRODUCTION" + environment_variables: {} + develop: + branch_name: "develop" + enable_auto_build: true + backend_enabled: false + enable_performance_mode: false + enable_pull_request_preview: false + framework: "Next.js - SSR" + stage: "DEVELOPMENT" + environment_variables: {} + domain_config: + enable_auto_sub_domain: false + wait_for_verification: false + sub_domain: + - branch_name: "main" + prefix: "example-prod" + - branch_name: "develop" + prefix: "example-dev" + subdomains_dns_records_enabled: true + certificate_verification_dns_record_enabled: false +``` + +The `amplify/example` YAML configuration defines an Amplify app in AWS. The app is set up to use the `Next.js` framework +with SSR (server-side rendering) and is linked to the GitHub repository "https://github.com/cloudposse/amplify-test2". + +The app is set up to have two environments: `main` and `develop`. Each environment has different configuration settings, +such as the branch name, framework, and stage. The `main` environment is set up for production, while the `develop` +environments is set up for development. + +The app is also configured to have custom subdomains for each environment, with prefixes such as `example-prod` and +`example-dev`. The subdomains are configured to use DNS records, which are enabled through the +`subdomains_dns_records_enabled` variable. + +The app also has an IAM service role configured with specific IAM actions, and environment variables set up for each +environment. Additionally, the app is configured to use the Atmos Spacelift workspace, as indicated by the +`workspace_enabled: true` setting. + +The `amplify/example` Atmos component extends the `amplify/defaults` component. + +The `amplify/example` configuration is imported into the `stacks/mixins/stage/dev.yaml` stack config file to be +provisioned in the `dev` account. + +```yaml +# stacks/mixins/stage/dev.yaml +import: + - catalog/amplify/example +``` + +You can execute the following command to provision the Amplify app using Atmos: + +```shell +atmos terraform apply amplify/example -s +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [amplify\_app](#module\_amplify\_app) | cloudposse/amplify-app/aws | 0.2.1 | +| [certificate\_verification\_dns\_record](#module\_certificate\_verification\_dns\_record) | cloudposse/route53-cluster-hostname/aws | 0.12.3 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [subdomains\_dns\_record](#module\_subdomains\_dns\_record) | cloudposse/route53-cluster-hostname/aws | 0.12.3 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ssm_parameter.github_pat](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [auto\_branch\_creation\_config](#input\_auto\_branch\_creation\_config) | The automated branch creation configuration for the Amplify app |
object({
basic_auth_credentials = optional(string)
build_spec = optional(string)
enable_auto_build = optional(bool)
enable_basic_auth = optional(bool)
enable_performance_mode = optional(bool)
enable_pull_request_preview = optional(bool)
environment_variables = optional(map(string))
framework = optional(string)
pull_request_environment_name = optional(string)
stage = optional(string)
})
| `null` | no | +| [auto\_branch\_creation\_patterns](#input\_auto\_branch\_creation\_patterns) | The automated branch creation glob patterns for the Amplify app | `list(string)` | `[]` | no | +| [basic\_auth\_credentials](#input\_basic\_auth\_credentials) | The credentials for basic authorization for the Amplify app | `string` | `null` | no | +| [build\_spec](#input\_build\_spec) | The [build specification](https://docs.aws.amazon.com/amplify/latest/userguide/build-settings.html) (build spec) for the Amplify app.
If not provided then it will use the `amplify.yml` at the root of your project / branch. | `string` | `null` | no | +| [certificate\_verification\_dns\_record\_enabled](#input\_certificate\_verification\_dns\_record\_enabled) | Whether or not to create DNS records for SSL certificate validation.
If using the DNS zone from `dns-delegated`, the SSL certificate is already validated, and this variable must be set to `false`. | `bool` | `false` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [custom\_rules](#input\_custom\_rules) | The custom rules to apply to the Amplify App |
list(object({
condition = optional(string)
source = string
status = optional(string)
target = string
}))
| `[]` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [description](#input\_description) | The description for the Amplify app | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dns\_delegated\_component\_name](#input\_dns\_delegated\_component\_name) | The component name of `dns-delegated` | `string` | `"dns-delegated"` | no | +| [dns\_delegated\_environment\_name](#input\_dns\_delegated\_environment\_name) | The environment name of `dns-delegated` | `string` | `"gbl"` | no | +| [domain\_config](#input\_domain\_config) | Amplify custom domain configuration |
object({
domain_name = optional(string)
enable_auto_sub_domain = optional(bool, false)
wait_for_verification = optional(bool, false)
sub_domain = list(object({
branch_name = string
prefix = string
}))
})
| `null` | no | +| [enable\_auto\_branch\_creation](#input\_enable\_auto\_branch\_creation) | Enables automated branch creation for the Amplify app | `bool` | `false` | no | +| [enable\_basic\_auth](#input\_enable\_basic\_auth) | Enables basic authorization for the Amplify app.
This will apply to all branches that are part of this app. | `bool` | `false` | no | +| [enable\_branch\_auto\_build](#input\_enable\_branch\_auto\_build) | Enables auto-building of branches for the Amplify App | `bool` | `true` | no | +| [enable\_branch\_auto\_deletion](#input\_enable\_branch\_auto\_deletion) | Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository | `bool` | `false` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [environment\_variables](#input\_environment\_variables) | The environment variables for the Amplify app | `map(string)` | `{}` | no | +| [environments](#input\_environments) | The configuration of the environments for the Amplify App |
map(object({
branch_name = optional(string)
backend_enabled = optional(bool, false)
environment_name = optional(string)
deployment_artifacts = optional(string)
stack_name = optional(string)
display_name = optional(string)
description = optional(string)
enable_auto_build = optional(bool)
enable_basic_auth = optional(bool)
enable_notification = optional(bool)
enable_performance_mode = optional(bool)
enable_pull_request_preview = optional(bool)
environment_variables = optional(map(string))
framework = optional(string)
pull_request_environment_name = optional(string)
stage = optional(string)
ttl = optional(number)
webhook_enabled = optional(bool, false)
}))
| `{}` | no | +| [github\_personal\_access\_token\_secret\_path](#input\_github\_personal\_access\_token\_secret\_path) | Path to the GitHub personal access token in AWS Parameter Store | `string` | `"/amplify/github_personal_access_token"` | no | +| [iam\_service\_role\_actions](#input\_iam\_service\_role\_actions) | List of IAM policy actions for the AWS Identity and Access Management (IAM) service role for the Amplify app.
If not provided, the default set of actions will be used for the role if the variable `iam_service_role_enabled` is set to `true`. | `list(string)` | `[]` | no | +| [iam\_service\_role\_arn](#input\_iam\_service\_role\_arn) | The AWS Identity and Access Management (IAM) service role for the Amplify app.
If not provided, a new role will be created if the variable `iam_service_role_enabled` is set to `true`. | `list(string)` | `[]` | no | +| [iam\_service\_role\_enabled](#input\_iam\_service\_role\_enabled) | Flag to create the IAM service role for the Amplify app | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [oauth\_token](#input\_oauth\_token) | The OAuth token for a third-party source control system for the Amplify app.
The OAuth token is used to create a webhook and a read-only deploy key.
The OAuth token is not stored. | `string` | `null` | no | +| [platform](#input\_platform) | The platform or framework for the Amplify app | `string` | `"WEB"` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS region | `string` | n/a | yes | +| [repository](#input\_repository) | The repository for the Amplify app | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [subdomains\_dns\_records\_enabled](#input\_subdomains\_dns\_records\_enabled) | Whether or not to create DNS records for the Amplify app custom subdomains | `bool` | `false` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [arn](#output\_arn) | Amplify App ARN | +| [backend\_environments](#output\_backend\_environments) | Created backend environments | +| [branch\_names](#output\_branch\_names) | The names of the created Amplify branches | +| [default\_domain](#output\_default\_domain) | Amplify App domain (non-custom) | +| [domain\_association\_arn](#output\_domain\_association\_arn) | ARN of the domain association | +| [domain\_association\_certificate\_verification\_dns\_record](#output\_domain\_association\_certificate\_verification\_dns\_record) | The DNS record for certificate verification | +| [name](#output\_name) | Amplify App name | +| [sub\_domains](#output\_sub\_domains) | DNS records and the verified status for the subdomains | +| [webhooks](#output\_webhooks) | Created webhooks | + + + +[](https://cpco.io/component) diff --git a/modules/amplify/context.tf b/modules/amplify/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/amplify/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/amplify/dns.tf b/modules/amplify/dns.tf new file mode 100644 index 000000000..63e0d8f09 --- /dev/null +++ b/modules/amplify/dns.tf @@ -0,0 +1,41 @@ +locals { + certificate_verification_dns_record_parts = split(" ", module.amplify_app.domain_association_certificate_verification_dns_record) +} + +# Create the SSL certificate validation record +module "certificate_verification_dns_record" { + source = "cloudposse/route53-cluster-hostname/aws" + version = "0.12.3" + + count = var.certificate_verification_dns_record_enabled ? 1 : 0 + + zone_id = module.dns_delegated.outputs.default_dns_zone_id + + dns_name = trimspace(local.certificate_verification_dns_record_parts[0]) + type = trimspace(local.certificate_verification_dns_record_parts[1]) + + records = [ + trimspace(local.certificate_verification_dns_record_parts[2]) + ] + + context = module.this.context +} + +# Create DNS records for the subdomains +module "subdomains_dns_record" { + source = "cloudposse/route53-cluster-hostname/aws" + version = "0.12.3" + + count = var.subdomains_dns_records_enabled && local.domain_config != null ? length(local.domain_config.sub_domain) : 0 + + zone_id = module.dns_delegated.outputs.default_dns_zone_id + + dns_name = trimspace(split(" ", tolist(module.amplify_app.sub_domains)[count.index].dns_record)[0]) + type = trimspace(split(" ", tolist(module.amplify_app.sub_domains)[count.index].dns_record)[1]) + + records = [ + trimspace(split(" ", tolist(module.amplify_app.sub_domains)[count.index].dns_record)[2]) + ] + + context = module.this.context +} diff --git a/modules/amplify/main.tf b/modules/amplify/main.tf new file mode 100644 index 000000000..eb4672744 --- /dev/null +++ b/modules/amplify/main.tf @@ -0,0 +1,46 @@ +locals { + enabled = module.this.enabled + + domain_config = var.domain_config != null ? { + domain_name = coalesce(lookup(var.domain_config, "domain_name", null), module.dns_delegated.outputs.default_domain_name) + enable_auto_sub_domain = lookup(var.domain_config, "enable_auto_sub_domain", false) + wait_for_verification = lookup(var.domain_config, "wait_for_verification", false) + sub_domain = lookup(var.domain_config, "sub_domain") + } : null +} + +# Read the GitHub PAT from SSM +data "aws_ssm_parameter" "github_pat" { + count = local.enabled ? 1 : 0 + + name = var.github_personal_access_token_secret_path + with_decryption = true +} + +module "amplify_app" { + source = "cloudposse/amplify-app/aws" + version = "0.2.1" + + description = var.description + repository = var.repository + platform = var.platform + access_token = one(data.aws_ssm_parameter.github_pat[*].value) + oauth_token = var.oauth_token + auto_branch_creation_config = var.auto_branch_creation_config + auto_branch_creation_patterns = var.auto_branch_creation_patterns + basic_auth_credentials = var.basic_auth_credentials + build_spec = var.build_spec + enable_auto_branch_creation = var.enable_auto_branch_creation + enable_basic_auth = var.enable_basic_auth + enable_branch_auto_build = var.enable_branch_auto_build + enable_branch_auto_deletion = var.enable_branch_auto_deletion + environment_variables = var.environment_variables + custom_rules = var.custom_rules + iam_service_role_enabled = var.iam_service_role_enabled + iam_service_role_arn = var.iam_service_role_arn + iam_service_role_actions = var.iam_service_role_actions + environments = var.environments + domain_config = local.domain_config + + context = module.this.context +} diff --git a/modules/amplify/outputs.tf b/modules/amplify/outputs.tf new file mode 100644 index 000000000..f38d27372 --- /dev/null +++ b/modules/amplify/outputs.tf @@ -0,0 +1,44 @@ +output "name" { + description = "Amplify App name" + value = module.amplify_app.name +} + +output "arn" { + description = "Amplify App ARN " + value = module.amplify_app.arn +} + +output "default_domain" { + description = "Amplify App domain (non-custom)" + value = module.amplify_app.default_domain +} + +output "backend_environments" { + description = "Created backend environments" + value = module.amplify_app.backend_environments +} + +output "branch_names" { + description = "The names of the created Amplify branches" + value = module.amplify_app.branch_names +} + +output "webhooks" { + description = "Created webhooks" + value = module.amplify_app.webhooks +} + +output "domain_association_arn" { + description = "ARN of the domain association" + value = module.amplify_app.domain_association_arn +} + +output "domain_association_certificate_verification_dns_record" { + description = "The DNS record for certificate verification" + value = module.amplify_app.domain_association_certificate_verification_dns_record +} + +output "sub_domains" { + description = "DNS records and the verified status for the subdomains" + value = module.amplify_app.sub_domains +} diff --git a/modules/amplify/providers.tf b/modules/amplify/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/amplify/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/amplify/remote-state.tf b/modules/amplify/remote-state.tf new file mode 100644 index 000000000..55f410fb5 --- /dev/null +++ b/modules/amplify/remote-state.tf @@ -0,0 +1,9 @@ +module "dns_delegated" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.dns_delegated_component_name + environment = var.dns_delegated_environment_name + + context = module.this.context +} diff --git a/modules/amplify/variables.tf b/modules/amplify/variables.tf new file mode 100644 index 000000000..eb64cdbcb --- /dev/null +++ b/modules/amplify/variables.tf @@ -0,0 +1,216 @@ +variable "region" { + type = string + description = "AWS region" +} + +variable "github_personal_access_token_secret_path" { + description = "Path to the GitHub personal access token in AWS Parameter Store" + default = "/amplify/github_personal_access_token" + type = string +} + +variable "description" { + type = string + description = "The description for the Amplify app" + default = null +} + +variable "repository" { + type = string + description = "The repository for the Amplify app" + default = null +} + +variable "platform" { + type = string + description = "The platform or framework for the Amplify app" + default = "WEB" +} + +variable "oauth_token" { + type = string + description = <<-EOT + The OAuth token for a third-party source control system for the Amplify app. + The OAuth token is used to create a webhook and a read-only deploy key. + The OAuth token is not stored. + EOT + default = null + sensitive = true +} + +variable "auto_branch_creation_config" { + type = object({ + basic_auth_credentials = optional(string) + build_spec = optional(string) + enable_auto_build = optional(bool) + enable_basic_auth = optional(bool) + enable_performance_mode = optional(bool) + enable_pull_request_preview = optional(bool) + environment_variables = optional(map(string)) + framework = optional(string) + pull_request_environment_name = optional(string) + stage = optional(string) + }) + description = "The automated branch creation configuration for the Amplify app" + default = null +} + +variable "auto_branch_creation_patterns" { + type = list(string) + description = "The automated branch creation glob patterns for the Amplify app" + default = [] +} + +variable "basic_auth_credentials" { + type = string + description = "The credentials for basic authorization for the Amplify app" + default = null +} + +variable "build_spec" { + type = string + description = <<-EOT + The [build specification](https://docs.aws.amazon.com/amplify/latest/userguide/build-settings.html) (build spec) for the Amplify app. + If not provided then it will use the `amplify.yml` at the root of your project / branch. + EOT + default = null +} + +variable "enable_auto_branch_creation" { + type = bool + description = "Enables automated branch creation for the Amplify app" + default = false +} + +variable "enable_basic_auth" { + type = bool + description = <<-EOT + Enables basic authorization for the Amplify app. + This will apply to all branches that are part of this app. + EOT + default = false +} + +variable "enable_branch_auto_build" { + type = bool + description = "Enables auto-building of branches for the Amplify App" + default = true +} + +variable "enable_branch_auto_deletion" { + type = bool + description = "Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository" + default = false +} + +variable "environment_variables" { + type = map(string) + description = "The environment variables for the Amplify app" + default = {} +} + +variable "iam_service_role_arn" { + type = list(string) + description = <<-EOT + The AWS Identity and Access Management (IAM) service role for the Amplify app. + If not provided, a new role will be created if the variable `iam_service_role_enabled` is set to `true`. + EOT + default = [] + nullable = false +} + +variable "iam_service_role_enabled" { + type = bool + description = "Flag to create the IAM service role for the Amplify app" + default = false + nullable = false +} + +variable "iam_service_role_actions" { + type = list(string) + description = <<-EOT + List of IAM policy actions for the AWS Identity and Access Management (IAM) service role for the Amplify app. + If not provided, the default set of actions will be used for the role if the variable `iam_service_role_enabled` is set to `true`. + EOT + default = [] + nullable = false +} + +variable "custom_rules" { + type = list(object({ + condition = optional(string) + source = string + status = optional(string) + target = string + })) + description = "The custom rules to apply to the Amplify App" + default = [] + nullable = false +} + +variable "environments" { + type = map(object({ + branch_name = optional(string) + backend_enabled = optional(bool, false) + environment_name = optional(string) + deployment_artifacts = optional(string) + stack_name = optional(string) + display_name = optional(string) + description = optional(string) + enable_auto_build = optional(bool) + enable_basic_auth = optional(bool) + enable_notification = optional(bool) + enable_performance_mode = optional(bool) + enable_pull_request_preview = optional(bool) + environment_variables = optional(map(string)) + framework = optional(string) + pull_request_environment_name = optional(string) + stage = optional(string) + ttl = optional(number) + webhook_enabled = optional(bool, false) + })) + description = "The configuration of the environments for the Amplify App" + default = {} + nullable = false +} + +variable "domain_config" { + type = object({ + domain_name = optional(string) + enable_auto_sub_domain = optional(bool, false) + wait_for_verification = optional(bool, false) + sub_domain = list(object({ + branch_name = string + prefix = string + })) + }) + description = "Amplify custom domain configuration" + default = null +} + +variable "certificate_verification_dns_record_enabled" { + type = bool + description = <<-EOT + Whether or not to create DNS records for SSL certificate validation. + If using the DNS zone from `dns-delegated`, the SSL certificate is already validated, and this variable must be set to `false`. + EOT + default = false +} + +variable "subdomains_dns_records_enabled" { + type = bool + description = "Whether or not to create DNS records for the Amplify app custom subdomains" + default = false +} + +variable "dns_delegated_component_name" { + type = string + description = "The component name of `dns-delegated`" + default = "dns-delegated" +} + +variable "dns_delegated_environment_name" { + type = string + description = "The environment name of `dns-delegated`" + default = "gbl" +} diff --git a/modules/amplify/versions.tf b/modules/amplify/versions.tf new file mode 100644 index 000000000..b5920b7b1 --- /dev/null +++ b/modules/amplify/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/api-gateway-account-settings/README.md b/modules/api-gateway-account-settings/README.md new file mode 100644 index 000000000..8e5bd4b07 --- /dev/null +++ b/modules/api-gateway-account-settings/README.md @@ -0,0 +1,100 @@ +--- +tags: + - component/api-gateway-account-settings + - layer/unassigned + - provider/aws +--- + +# Component: `api-gateway-account-settings` + +This component is responsible for setting the global, regional settings required to allow API Gateway to write to +CloudWatch logs. + +Every AWS region you want to deploy an API Gateway to must be configured with an IAM Role that gives API Gateway +permissions to create and write to CloudWatch logs. Without this configuration, API Gateway will not be able to send +logs to CloudWatch. This configuration is done once per region regardless of the number of API Gateways deployed in that +region. This module creates an IAM role, assigns it the necessary permissions to write logs and sets it as the +"CloudWatch log role ARN" in the API Gateway configuration. + +## Usage + +**Stack Level**: Regional + +The following is a snippet for how to use this component: + +```yaml +components: + terraform: + api-gateway-account-settings: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + tags: + Service: api-gateway +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [api\_gateway\_account\_settings](#module\_api\_gateway\_account\_settings) | cloudposse/api-gateway/aws//modules/account-settings | 0.3.1 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [role\_arn](#output\_role\_arn) | Role ARN of the API Gateway logging role | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/api-gateway-settings) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/api-gateway-account-settings/context.tf b/modules/api-gateway-account-settings/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/api-gateway-account-settings/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/api-gateway-account-settings/main.tf b/modules/api-gateway-account-settings/main.tf new file mode 100644 index 000000000..1a812109d --- /dev/null +++ b/modules/api-gateway-account-settings/main.tf @@ -0,0 +1,6 @@ +module "api_gateway_account_settings" { + source = "cloudposse/api-gateway/aws//modules/account-settings" + version = "0.3.1" + + context = module.this.context +} diff --git a/modules/api-gateway-account-settings/outputs.tf b/modules/api-gateway-account-settings/outputs.tf new file mode 100644 index 000000000..1b0b7a57d --- /dev/null +++ b/modules/api-gateway-account-settings/outputs.tf @@ -0,0 +1,4 @@ +output "role_arn" { + description = "Role ARN of the API Gateway logging role" + value = module.api_gateway_account_settings.role_arn +} diff --git a/modules/api-gateway-account-settings/providers.tf b/modules/api-gateway-account-settings/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/api-gateway-account-settings/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/api-gateway-account-settings/variables.tf b/modules/api-gateway-account-settings/variables.tf new file mode 100644 index 000000000..0753180bf --- /dev/null +++ b/modules/api-gateway-account-settings/variables.tf @@ -0,0 +1,4 @@ +variable "region" { + type = string + description = "AWS Region" +} diff --git a/modules/api-gateway-account-settings/versions.tf b/modules/api-gateway-account-settings/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/modules/api-gateway-account-settings/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/api-gateway-rest-api/README.md b/modules/api-gateway-rest-api/README.md new file mode 100644 index 000000000..0e7e44e72 --- /dev/null +++ b/modules/api-gateway-rest-api/README.md @@ -0,0 +1,137 @@ +--- +tags: + - component/api-gateway-rest-api + - layer/addons + - provider/aws +--- + +# Component: `api-gateway-rest-api` + +This component is responsible for deploying an API Gateway REST API. + +## Usage + +**Stack Level**: Regional + +The following is a snippet for how to use this component: + +```yaml +components: + terraform: + api-gateway-rest-api: + vars: + enabled: true + name: api + openapi_config: + openapi: 3.0.1 + info: + title: Example API Gateway + version: 1.0.0 + paths: + "/": + get: + x-amazon-apigateway-integration: + httpMethod: GET + payloadFormatVersion: 1.0 + type: HTTP_PROXY + uri: https://api.ipify.org + "/{proxy+}": + get: + x-amazon-apigateway-integration: + httpMethod: GET + payloadFormatVersion: 1.0 + type: HTTP_PROXY + uri: https://api.ipify.org +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [acm](#module\_acm) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [api\_gateway\_rest\_api](#module\_api\_gateway\_rest\_api) | cloudposse/api-gateway/aws | 0.3.1 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [nlb](#module\_nlb) | cloudposse/nlb/aws | 0.12.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_api_gateway_base_path_mapping.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_base_path_mapping) | resource | +| [aws_api_gateway_domain_name.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_domain_name) | resource | +| [aws_route53_record.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_acm_certificate.issued](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/acm_certificate) | data source | +| [aws_route53_zone.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/route53_zone) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [access\_log\_format](#input\_access\_log\_format) | The format of the access log file. | `string` | `" {\n \"requestTime\": \"$context.requestTime\",\n \"requestId\": \"$context.requestId\",\n \"httpMethod\": \"$context.httpMethod\",\n \"path\": \"$context.path\",\n \"resourcePath\": \"$context.resourcePath\",\n \"status\": $context.status,\n \"responseLatency\": $context.responseLatency,\n \"xrayTraceId\": \"$context.xrayTraceId\",\n \"integrationRequestId\": \"$context.integration.requestId\",\n \"functionResponseStatus\": \"$context.integration.status\",\n \"integrationLatency\": \"$context.integration.latency\",\n \"integrationServiceStatus\": \"$context.integration.integrationStatus\",\n \"authorizeResultStatus\": \"$context.authorize.status\",\n \"authorizerServiceStatus\": \"$context.authorizer.status\",\n \"authorizerLatency\": \"$context.authorizer.latency\",\n \"authorizerRequestId\": \"$context.authorizer.requestId\",\n \"ip\": \"$context.identity.sourceIp\",\n \"userAgent\": \"$context.identity.userAgent\",\n \"principalId\": \"$context.authorizer.principalId\",\n \"cognitoUser\": \"$context.identity.cognitoIdentityId\",\n \"user\": \"$context.identity.user\"\n}\n"` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [deregistration\_delay](#input\_deregistration\_delay) | The amount of time to wait in seconds before changing the state of a deregistering target to unused | `number` | `15` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enable\_private\_link\_nlb](#input\_enable\_private\_link\_nlb) | A flag to indicate whether to enable private link. | `bool` | `false` | no | +| [enable\_private\_link\_nlb\_deletion\_protection](#input\_enable\_private\_link\_nlb\_deletion\_protection) | A flag to indicate whether to enable private link deletion protection. | `bool` | `false` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [endpoint\_type](#input\_endpoint\_type) | The type of the endpoint. One of - PUBLIC, PRIVATE, REGIONAL | `string` | `"REGIONAL"` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [fully\_qualified\_domain\_name](#input\_fully\_qualified\_domain\_name) | The fully qualified domain name of the API. | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [logging\_level](#input\_logging\_level) | The logging level of the API. One of - OFF, INFO, ERROR | `string` | `"INFO"` | no | +| [metrics\_enabled](#input\_metrics\_enabled) | A flag to indicate whether to enable metrics collection. | `bool` | `true` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [openapi\_config](#input\_openapi\_config) | The OpenAPI specification for the API | `any` | `{}` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [rest\_api\_policy](#input\_rest\_api\_policy) | The IAM policy document for the API. | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [xray\_tracing\_enabled](#input\_xray\_tracing\_enabled) | A flag to indicate whether to enable X-Ray tracing. | `bool` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [arn](#output\_arn) | The ARN of the REST API | +| [created\_date](#output\_created\_date) | The date the REST API was created | +| [execution\_arn](#output\_execution\_arn) | The execution ARN part to be used in lambda\_permission's source\_arn when allowing API Gateway to invoke a Lambda
function, e.g., arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j, which can be concatenated with allowed stage,
method and resource path.The ARN of the Lambda function that will be executed. | +| [id](#output\_id) | The ID of the REST API | +| [invoke\_url](#output\_invoke\_url) | The URL to invoke the REST API | +| [root\_resource\_id](#output\_root\_resource\_id) | The resource ID of the REST API's root | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/TODO) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/api-gateway-rest-api/context.tf b/modules/api-gateway-rest-api/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/api-gateway-rest-api/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/api-gateway-rest-api/main.tf b/modules/api-gateway-rest-api/main.tf new file mode 100644 index 000000000..b083b360e --- /dev/null +++ b/modules/api-gateway-rest-api/main.tf @@ -0,0 +1,77 @@ +locals { + enabled = module.this.enabled + + sub_domain = var.name + root_domain = coalesce(module.acm.outputs.domain_name, join(".", [ + module.this.environment, module.dns_delegated.outputs.default_domain_name + ]), module.dns_delegated.outputs.default_domain_name) + domain_name = join(".", [local.sub_domain, local.root_domain]) +} + +module "api_gateway_rest_api" { + source = "cloudposse/api-gateway/aws" + version = "0.3.1" + + enabled = local.enabled + + openapi_config = var.openapi_config + endpoint_type = var.endpoint_type + logging_level = var.logging_level + metrics_enabled = var.metrics_enabled + xray_tracing_enabled = var.xray_tracing_enabled + access_log_format = var.access_log_format + rest_api_policy = var.rest_api_policy + private_link_target_arns = module.nlb[*].nlb_arn + + context = module.this.context +} + +data "aws_acm_certificate" "issued" { + count = local.enabled ? 1 : 0 + domain = local.root_domain + statuses = ["ISSUED"] +} + +data "aws_route53_zone" "this" { + count = local.enabled ? 1 : 0 + name = module.dns_delegated.outputs.default_domain_name + private_zone = false +} + +resource "aws_api_gateway_domain_name" "this" { + count = local.enabled ? 1 : 0 + domain_name = local.domain_name + regional_certificate_arn = data.aws_acm_certificate.issued[0].arn + + endpoint_configuration { + types = ["REGIONAL"] + } + + tags = module.this.tags +} + +resource "aws_api_gateway_base_path_mapping" "this" { + count = local.enabled ? 1 : 0 + api_id = module.api_gateway_rest_api.id + domain_name = aws_api_gateway_domain_name.this[0].domain_name + stage_name = module.this.stage + + depends_on = [ + aws_api_gateway_domain_name.this, + module.api_gateway_rest_api + ] + +} + +resource "aws_route53_record" "this" { + count = local.enabled ? 1 : 0 + name = aws_api_gateway_domain_name.this[0].domain_name + type = "A" + zone_id = data.aws_route53_zone.this[0].id + + alias { + evaluate_target_health = true + name = aws_api_gateway_domain_name.this[0].regional_domain_name + zone_id = aws_api_gateway_domain_name.this[0].regional_zone_id + } +} diff --git a/modules/api-gateway-rest-api/nlb.tf b/modules/api-gateway-rest-api/nlb.tf new file mode 100644 index 000000000..403e5647e --- /dev/null +++ b/modules/api-gateway-rest-api/nlb.tf @@ -0,0 +1,30 @@ +module "nlb" { + source = "cloudposse/nlb/aws" + version = "0.12.0" + count = var.enable_private_link_nlb ? 1 : 0 + + enabled = local.enabled + + vpc_id = module.vpc.outputs.vpc.id + subnet_ids = module.vpc.outputs.private_subnet_ids + internal = true + tcp_enabled = true + cross_zone_load_balancing_enabled = true + ip_address_type = "ipv4" + deletion_protection_enabled = var.enable_private_link_nlb_deletion_protection + tcp_port = 443 + target_group_port = 443 + target_group_target_type = "alb" + health_check_protocol = "HTTPS" + nlb_access_logs_s3_bucket_force_destroy = true + deregistration_delay = var.deregistration_delay + + context = module.this.context +} + +## You can use a target attachment like below to point the nlb at an ecs alb. +#resource "aws_lb_target_group_attachment" "alb" { +# target_group_arn = one(module.nlb[*].default_target_group_arn) +# target_id = module.ecs.outputs.alb_arn +# port = 443 +#} diff --git a/modules/api-gateway-rest-api/outputs.tf b/modules/api-gateway-rest-api/outputs.tf new file mode 100644 index 000000000..0fd29402a --- /dev/null +++ b/modules/api-gateway-rest-api/outputs.tf @@ -0,0 +1,33 @@ +output "id" { + description = "The ID of the REST API" + value = module.this.enabled ? module.api_gateway_rest_api.id : null +} + +output "root_resource_id" { + description = "The resource ID of the REST API's root" + value = module.this.enabled ? module.api_gateway_rest_api.root_resource_id : null +} + +output "created_date" { + description = "The date the REST API was created" + value = module.this.enabled ? module.api_gateway_rest_api.created_date : null +} + +output "execution_arn" { + description = < ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | -| [github](#requirement\_github) | >= 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [github](#requirement\_github) | >= 6.0 | +| [random](#requirement\_random) | >= 2.3 | | [tls](#requirement\_tls) | >= 3.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [github](#provider\_github) | >= 4.0 | +| [aws](#provider\_aws) | >= 4.0 | +| [github](#provider\_github) | >= 6.0 | | [tls](#provider\_tls) | >= 3.0 | ## Modules @@ -99,7 +108,7 @@ $ terraform import -var "import_profile_name=eg-mgmt-gbl-corp-admin" -var-file=" | Name | Source | Version | |------|--------|---------| | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [store\_write](#module\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.10.0 | +| [store\_write](#module\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.11.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -118,6 +127,7 @@ $ terraform import -var "import_profile_name=eg-mgmt-gbl-corp-admin" -var-file=" | [github_team_repository.default](https://registry.terraform.io/providers/integrations/github/latest/docs/resources/team_repository) | resource | | [tls_private_key.default](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ssm_parameter.github_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [github_repository.default](https://registry.terraform.io/providers/integrations/github/latest/docs/data-sources/repository) | data source | | [github_team.default](https://registry.terraform.io/providers/integrations/github/latest/docs/data-sources/team) | data source | | [github_user.automation_user](https://registry.terraform.io/providers/integrations/github/latest/docs/data-sources/user) | data source | @@ -128,32 +138,36 @@ $ terraform import -var "import_profile_name=eg-mgmt-gbl-corp-admin" -var-file=" | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_repo](#input\_create\_repo) | Whether or not to create the repository or use an existing one | `bool` | `true` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [description](#input\_description) | The description of the repository | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [environments](#input\_environments) | Environments to populate `applicationset.yaml` files and repository deploy keys (for ArgoCD) for.

`auto-sync` determines whether or not the ArgoCD application will be automatically synced. |
list(object({
tenant = string
environment = string
stage = string
auto-sync = bool
}))
| `[]` | no | +| [environments](#input\_environments) | Environments to populate `applicationset.yaml` files and repository deploy keys (for ArgoCD) for.

`auto-sync` determines whether or not the ArgoCD application will be automatically synced.

`ignore-differences` determines whether or not the ArgoCD application will ignore the number of
replicas in the deployment. Read more on ignore differences here:
https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/#respect-ignore-difference-configs

Example:
tenant: plat
environment: use1
stage: sandbox
auto-sync: true
ignore-differences:
- group: apps
kind: Deployment
json-pointers:
- /spec/replicas
|
list(object({
tenant = optional(string, null)
environment = string
stage = string
attributes = optional(list(string), [])
auto-sync = bool
ignore-differences = optional(list(object({
group = string,
kind = string,
json-pointers = list(string)
})), [])
}))
| `[]` | no | | [github\_base\_url](#input\_github\_base\_url) | This is the target GitHub base API endpoint. Providing a value is a requirement when working with GitHub Enterprise. It is optional to provide this value and it can also be sourced from the `GITHUB_BASE_URL` environment variable. The value must end with a slash, for example: `https://terraformtesting-ghe.westus.cloudapp.azure.com/` | `string` | `null` | no | | [github\_codeowner\_teams](#input\_github\_codeowner\_teams) | List of teams to use when populating the CODEOWNERS file.

For example: `["@ACME/cloud-admins", "@ACME/cloud-developers"]`. | `list(string)` | n/a | yes | +| [github\_default\_notifications\_enabled](#input\_github\_default\_notifications\_enabled) | Enable default GitHub commit statuses notifications (required for CD sync mode) | `string` | `true` | no | +| [github\_notifications](#input\_github\_notifications) | ArgoCD notification annotations for subscribing to GitHub.

The default value given uses the same notification template names as defined in the `eks/argocd` component. If want to add additional notifications, include any existing notifications from this list that you want to keep in addition. | `list(string)` |
[
"notifications.argoproj.io/subscribe.on-deploy-started.app-repo-github-commit-status: \"\"",
"notifications.argoproj.io/subscribe.on-deploy-started.argocd-repo-github-commit-status: \"\"",
"notifications.argoproj.io/subscribe.on-deploy-succeded.app-repo-github-commit-status: \"\"",
"notifications.argoproj.io/subscribe.on-deploy-succeded.argocd-repo-github-commit-status: \"\"",
"notifications.argoproj.io/subscribe.on-deploy-failed.app-repo-github-commit-status: \"\"",
"notifications.argoproj.io/subscribe.on-deploy-failed.argocd-repo-github-commit-status: \"\""
]
| no | | [github\_organization](#input\_github\_organization) | GitHub Organization | `string` | n/a | yes | | [github\_token\_override](#input\_github\_token\_override) | Use the value of this variable as the GitHub token instead of reading it from SSM | `string` | `null` | no | | [github\_user](#input\_github\_user) | Github user | `string` | n/a | yes | | [github\_user\_email](#input\_github\_user\_email) | Github user email | `string` | n/a | yes | | [gitignore\_entries](#input\_gitignore\_entries) | List of .gitignore entries to use when populating the .gitignore file.

For example: `[".idea/", ".vscode/"]`. | `list(string)` | n/a | yes | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [manifest\_kubernetes\_namespace](#input\_manifest\_kubernetes\_namespace) | The namespace used for the ArgoCD application | `string` | `"argocd"` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [permissions](#input\_permissions) | A list of Repository Permission objects used to configure the team permissions of the repository

`team_slug` should be the name of the team without the `@{org}` e.g. `@cloudposse/team` => `team`
`permission` is just one of the available values listed below |
list(object({
team_slug = string,
permission = string
}))
| `[]` | no | +| [push\_restrictions\_enabled](#input\_push\_restrictions\_enabled) | Enforce who can push to the main branch | `bool` | `true` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [slack\_channel](#input\_slack\_channel) | The name of the slack channel to configure ArgoCD notifications for | `string` | `null` | no | +| [required\_pull\_request\_reviews](#input\_required\_pull\_request\_reviews) | Enforce restrictions for pull request reviews | `bool` | `true` | no | +| [slack\_notifications\_channel](#input\_slack\_notifications\_channel) | If given, the Slack channel to for deployment notifications. | `string` | `""` | no | | [ssm\_github\_api\_key](#input\_ssm\_github\_api\_key) | SSM path to the GitHub API key | `string` | `"/argocd/github/api_key"` | no | | [ssm\_github\_deploy\_key\_format](#input\_ssm\_github\_deploy\_key\_format) | Format string of the SSM parameter path to which the deploy keys will be written to (%s will be replaced with the environment name) | `string` | `"/argocd/deploy_keys/%s"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | @@ -166,16 +180,18 @@ $ terraform import -var "import_profile_name=eg-mgmt-gbl-corp-admin" -var-file=" |------|-------------| | [deploy\_keys\_ssm\_path\_format](#output\_deploy\_keys\_ssm\_path\_format) | SSM Parameter Store path format for the repository's deploy keys | | [deploy\_keys\_ssm\_paths](#output\_deploy\_keys\_ssm\_paths) | SSM Parameter Store paths for the repository's deploy keys | +| [repository](#output\_repository) | Repository name | | [repository\_default\_branch](#output\_repository\_default\_branch) | Repository default branch | | [repository\_description](#output\_repository\_description) | Repository description | | [repository\_git\_clone\_url](#output\_repository\_git\_clone\_url) | Repository git clone URL | | [repository\_ssh\_clone\_url](#output\_repository\_ssh\_clone\_url) | Repository SSH clone URL | | [repository\_url](#output\_repository\_url) | Repository URL | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/argocd-repo) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/argocd-repo) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/argocd-repo/applicationset.tf b/modules/argocd-repo/applicationset.tf index 39c1f5990..21abbf92b 100644 --- a/modules/argocd-repo/applicationset.tf +++ b/modules/argocd-repo/applicationset.tf @@ -1,16 +1,23 @@ +locals { + github_default_notifications_enabled = local.enabled && var.github_default_notifications_enabled + github_notifications = local.github_default_notifications_enabled ? var.github_notifications : [] +} + resource "github_repository_file" "application_set" { for_each = local.environments - repository = join("", github_repository.default.*.name) - branch = join("", github_repository.default.*.default_branch) - file = "${each.value.tenant != null ? format("%s/", each.value.tenant) : ""}${each.value.environment}-${each.value.stage}/${local.manifest_kubernetes_namespace}/applicationset.yaml" + repository = local.github_repository.name + branch = local.github_repository.default_branch + file = "${each.value.tenant != null ? format("%s/", each.value.tenant) : ""}${each.value.environment}-${each.value.stage}${length(each.value.attributes) > 0 ? format("-%s", join("-", each.value.attributes)) : ""}/${local.manifest_kubernetes_namespace}/applicationset.yaml" content = templatefile("${path.module}/templates/applicationset.yaml.tpl", { - environment = each.key - auto-sync = each.value.auto-sync - name = module.this.namespace - namespace = local.manifest_kubernetes_namespace - ssh_url = join("", github_repository.default.*.ssh_clone_url) - slack_channel = var.slack_channel + environment = each.key + auto-sync = each.value.auto-sync + ignore-differences = each.value.ignore-differences + name = module.this.namespace + namespace = local.manifest_kubernetes_namespace + ssh_url = local.github_repository.ssh_clone_url + notifications = local.github_notifications + slack_notifications_channel = var.slack_notifications_channel }) commit_message = "Initialize environment: `${each.key}`." commit_author = var.github_user diff --git a/modules/argocd-repo/default.auto.tfvars b/modules/argocd-repo/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/argocd-repo/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/argocd-repo/git-files.tf b/modules/argocd-repo/git-files.tf index 977ced701..ddb4dc95d 100644 --- a/modules/argocd-repo/git-files.tf +++ b/modules/argocd-repo/git-files.tf @@ -1,8 +1,8 @@ resource "github_repository_file" "gitignore" { count = local.enabled ? 1 : 0 - repository = join("", github_repository.default.*.name) - branch = join("", github_repository.default.*.default_branch) + repository = local.github_repository.name + branch = local.github_repository.default_branch file = ".gitignore" content = templatefile("${path.module}/templates/.gitignore.tpl", { entries = var.gitignore_entries @@ -16,12 +16,12 @@ resource "github_repository_file" "gitignore" { resource "github_repository_file" "readme" { count = local.enabled ? 1 : 0 - repository = join("", github_repository.default.*.name) - branch = join("", github_repository.default.*.default_branch) + repository = local.github_repository.name + branch = local.github_repository.default_branch file = "README.md" content = templatefile("${path.module}/templates/README.md.tpl", { - repository_name = join("", github_repository.default.*.name) - repository_description = join("", github_repository.default.*.description) + repository_name = local.github_repository.name + repository_description = local.github_repository.description github_organization = var.github_organization }) commit_message = "Create README.md file." @@ -33,8 +33,8 @@ resource "github_repository_file" "readme" { resource "github_repository_file" "codeowners_file" { count = local.enabled ? 1 : 0 - repository = join("", github_repository.default.*.name) - branch = join("", github_repository.default.*.default_branch) + repository = local.github_repository.name + branch = local.github_repository.default_branch file = ".github/CODEOWNERS" content = templatefile("${path.module}/templates/CODEOWNERS.tpl", { codeowners = var.github_codeowner_teams @@ -48,8 +48,8 @@ resource "github_repository_file" "codeowners_file" { resource "github_repository_file" "pull_request_template" { count = local.enabled ? 1 : 0 - repository = join("", github_repository.default.*.name) - branch = join("", github_repository.default.*.default_branch) + repository = local.github_repository.name + branch = local.github_repository.default_branch file = ".github/PULL_REQUEST_TEMPLATE.md" content = file("${path.module}/templates/PULL_REQUEST_TEMPLATE.md") commit_message = "Create PULL_REQUEST_TEMPLATE.md file." diff --git a/modules/argocd-repo/main.tf b/modules/argocd-repo/main.tf index ff61154a6..43a7109d3 100644 --- a/modules/argocd-repo/main.tf +++ b/modules/argocd-repo/main.tf @@ -1,15 +1,18 @@ locals { enabled = module.this.enabled + environments = local.enabled ? { for env in var.environments : (format( - "${env.tenant != null ? "%[1]s/" : ""}%[2]s-%[3]s", + "${env.tenant != null ? "%[1]s/" : ""}%[2]s-%[3]s${length(env.attributes) > 0 ? "-%[4]s" : "%[4]s"}", env.tenant, env.environment, env.stage, + join("-", env.attributes) )) => env } : {} - manifest_kubernetes_namespace = "argocd" + + manifest_kubernetes_namespace = var.manifest_kubernetes_namespace team_slugs = toset(compact([ for permission in var.permissions : lookup(permission, "team_slug", null) @@ -25,23 +28,36 @@ locals { permission = var.permissions[index].permission } } + + empty_repo = { + name = "" + default_branch = "" + } + + github_repository = try((var.create_repo ? github_repository.default : data.github_repository.default)[0], local.empty_repo) +} + +data "github_repository" "default" { + count = local.enabled && !var.create_repo ? 1 : 0 + name = var.name } resource "github_repository" "default" { - count = local.enabled ? 1 : 0 + count = local.enabled && var.create_repo ? 1 : 0 name = module.this.name description = var.description auto_init = true # will create a 'main' branch - visibility = "private" + visibility = "private" + vulnerability_alerts = var.vulnerability_alerts_enabled } resource "github_branch_default" "default" { count = local.enabled ? 1 : 0 - repository = join("", github_repository.default.*.name) - branch = join("", github_repository.default.*.default_branch) + repository = local.github_repository.name + branch = local.github_repository.default_branch } data "github_user" "automation_user" { @@ -55,21 +71,32 @@ resource "github_branch_protection" "default" { # the main branch. Those commits made by the automation user, which is an admin. count = local.enabled ? 1 : 0 - repository_id = join("", github_repository.default.*.name) + repository_id = local.github_repository.name - pattern = join("", github_branch_default.default.*.branch) + pattern = join("", github_branch_default.default[*].branch) enforce_admins = false # needs to be false in order to allow automation user to push allows_deletions = true - required_pull_request_reviews { - dismiss_stale_reviews = true - restrict_dismissals = true - require_code_owner_reviews = true + dynamic "required_pull_request_reviews" { + for_each = var.required_pull_request_reviews ? [0] : [] + content { + dismiss_stale_reviews = true + restrict_dismissals = true + require_code_owner_reviews = true + } } - push_restrictions = [ - join("", data.github_user.automation_user.*.node_id), - ] + restrict_pushes { + push_allowances = var.push_restrictions_enabled ? [ + join("", data.github_user.automation_user[*].node_id), + ] : [] + } + + lifecycle { + ignore_changes = [ + restrict_pushes[0].push_allowances + ] + } } data "github_team" "default" { @@ -81,7 +108,7 @@ data "github_team" "default" { resource "github_team_repository" "default" { for_each = local.team_permissions - repository = join("", github_repository.default[*].name) + repository = local.github_repository.name team_id = each.value.id permission = each.value.permission } @@ -96,8 +123,8 @@ resource "tls_private_key" "default" { resource "github_repository_deploy_key" "default" { for_each = local.environments - title = "Deploy key for ArgoCD environment: ${each.key} (${join("", github_repository.default.*.default_branch)} branch)" - repository = join("", github_repository.default.*.name) + title = "Deploy key for ArgoCD environment: ${each.key} (${local.github_repository.default_branch} branch)" + repository = local.github_repository.name key = tls_private_key.default[each.key].public_key_openssh read_only = true } diff --git a/modules/argocd-repo/outputs.tf b/modules/argocd-repo/outputs.tf index b2bb304d1..19430b352 100644 --- a/modules/argocd-repo/outputs.tf +++ b/modules/argocd-repo/outputs.tf @@ -8,27 +8,32 @@ output "deploy_keys_ssm_path_format" { value = local.enabled ? var.ssm_github_deploy_key_format : null } +output "repository" { + description = "Repository name" + value = local.enabled && var.create_repo ? module.this.name : var.name +} + output "repository_description" { description = "Repository description" - value = join("", github_repository.default.*.description) + value = local.github_repository.description } output "repository_default_branch" { description = "Repository default branch" - value = join("", github_repository.default.*.default_branch) + value = local.github_repository.default_branch } output "repository_url" { description = "Repository URL" - value = join("", github_repository.default.*.html_url) + value = local.github_repository.html_url } output "repository_git_clone_url" { description = "Repository git clone URL" - value = join("", github_repository.default.*.git_clone_url) + value = local.github_repository.git_clone_url } output "repository_ssh_clone_url" { description = "Repository SSH clone URL" - value = join("", github_repository.default.*.ssh_clone_url) + value = local.github_repository.ssh_clone_url } diff --git a/modules/argocd-repo/ssm.tf b/modules/argocd-repo/provider-github.tf similarity index 83% rename from modules/argocd-repo/ssm.tf rename to modules/argocd-repo/provider-github.tf index d85e85cf2..60ed4e0b4 100644 --- a/modules/argocd-repo/ssm.tf +++ b/modules/argocd-repo/provider-github.tf @@ -10,7 +10,7 @@ data "aws_ssm_parameter" "github_api_key" { module "store_write" { source = "cloudposse/ssm-parameter-store/aws" - version = "0.10.0" + version = "0.11.0" parameter_write = [for k, v in local.environments : { @@ -24,3 +24,9 @@ module "store_write" { context = module.this.context } + +provider "github" { + base_url = var.github_base_url + owner = var.github_organization + token = local.github_token +} diff --git a/modules/argocd-repo/providers.tf b/modules/argocd-repo/providers.tf index 9ee194cea..54257fd20 100644 --- a/modules/argocd-repo/providers.tf +++ b/modules/argocd-repo/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = module.iam_roles.terraform_role_arn } } } @@ -15,21 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -provider "github" { - base_url = var.github_base_url - owner = var.github_organization - token = local.github_token -} diff --git a/modules/argocd-repo/templates/.gitignore.tpl b/modules/argocd-repo/templates/.gitignore.tpl index 20c7c32c7..d1086c259 100644 --- a/modules/argocd-repo/templates/.gitignore.tpl +++ b/modules/argocd-repo/templates/.gitignore.tpl @@ -3,4 +3,4 @@ %{ for entry in entries ~} ${entry} -%{ endfor ~} \ No newline at end of file +%{ endfor ~} diff --git a/modules/argocd-repo/templates/README.md.tpl b/modules/argocd-repo/templates/README.md.tpl index 36615a3b5..8593e2d0f 100644 --- a/modules/argocd-repo/templates/README.md.tpl +++ b/modules/argocd-repo/templates/README.md.tpl @@ -11,4 +11,4 @@ them to an `apps/[app name]/` subdirectory in each environment's directory. The `applicationset.yaml` file in each environment directory's `argocd/` subdirectory is referenced by ArgoCD deployment in each environment's dedicated EKS cluster. This ApplicationSet manifest makes use of [Git Generators](https://argocd-applicationset.readthedocs.io/en/stable/Generators-Git/) -in order to dynamically create ArgoCD Application objects based on the manifests in the `apps/[app name]/` directory. \ No newline at end of file +in order to dynamically create ArgoCD Application objects based on the manifests in the `apps/[app name]/` directory. diff --git a/modules/argocd-repo/templates/applicationset.yaml.tpl b/modules/argocd-repo/templates/applicationset.yaml.tpl index 11f695dea..e44b750ff 100644 --- a/modules/argocd-repo/templates/applicationset.yaml.tpl +++ b/modules/argocd-repo/templates/applicationset.yaml.tpl @@ -8,23 +8,6 @@ metadata: argocd-autopilot.argoproj-labs.io/default-dest-server: https://kubernetes.default.svc argocd.argoproj.io/sync-options: PruneLast=true argocd.argoproj.io/sync-wave: "-2" - notifications.argoproj.io/subscribe.on-deployed.slack: ${slack_channel} - notifications.argoproj.io/subscribe.on-health-degraded.slack: ${slack_channel} - notifications.argoproj.io/subscribe.on-sync-failed.slack: ${slack_channel} - notifications.argoproj.io/subscribe.on-sync-running.slack: ${slack_channel} - notifications.argoproj.io/subscribe.on-sync-status-unknown.slack: ${slack_channel} - notifications.argoproj.io/subscribe.on-sync-succeeded.slack: ${slack_channel} - notifications.argoproj.io/subscribe.on-deployed.datadog: "" - notifications.argoproj.io/subscribe.on-health-degraded.datadog: "" - notifications.argoproj.io/subscribe.on-sync-failed.datadog: "" - notifications.argoproj.io/subscribe.on-sync-running.datadog: "" - notifications.argoproj.io/subscribe.on-sync-status-unknown.datadog: "" - notifications.argoproj.io/subscribe.on-sync-succeeded.datadog: "" - notifications.argoproj.io/subscribe.on-deleted.slack: ${slack_channel} - notifications.argoproj.io/subscribe.on-deployed.github-deployment: "" - notifications.argoproj.io/subscribe.on-deployed.github-commit-status: "" - notifications.argoproj.io/subscribe.on-deleted.github-deployment: "" - creationTimestamp: null name: ${name} namespace: ${namespace} spec: @@ -65,6 +48,17 @@ spec: app_repository: '{{app_repository}}' app_commit: '{{app_commit}}' app_hostname: 'https://{{app_hostname}}' +%{for noti in notifications ~} + ${noti} +%{ endfor ~} +%{if length(slack_notifications_channel) > 0 ~} + notifications.argoproj.io/subscribe.on-created.slack: ${slack_notifications_channel} + notifications.argoproj.io/subscribe.on-deleted.slack: ${slack_notifications_channel} + notifications.argoproj.io/subscribe.on-success.slack: ${slack_notifications_channel} + notifications.argoproj.io/subscribe.on-health-degraded.slack: ${slack_notifications_channel} + notifications.argoproj.io/subscribe.on-failure.slack: ${slack_notifications_channel} + notifications.argoproj.io/subscribe.on-started.slack: ${slack_notifications_channel} +%{ endif ~} name: '{{name}}' spec: project: ${name} @@ -83,3 +77,15 @@ spec: %{ endif ~} syncOptions: - CreateNamespace=true +%{if length(ignore-differences) > 0 ~} + - RespectIgnoreDifferences=true + ignoreDifferences: +%{for item in ignore-differences ~} + - group: "${item.group}" + kind: "${item.kind}" + jsonPointers: +%{for pointer in item.json-pointers ~} + - ${pointer} +%{ endfor ~} +%{ endfor ~} +%{ endif ~} diff --git a/modules/argocd-repo/variables.tf b/modules/argocd-repo/variables.tf index 739c6ef13..0f4716517 100644 --- a/modules/argocd-repo/variables.tf +++ b/modules/argocd-repo/variables.tf @@ -11,15 +11,38 @@ variable "description" { variable "environments" { type = list(object({ - tenant = string + tenant = optional(string, null) environment = string stage = string + attributes = optional(list(string), []) auto-sync = bool + ignore-differences = optional(list(object({ + group = string, + kind = string, + json-pointers = list(string) + })), []) })) description = <<-EOT Environments to populate `applicationset.yaml` files and repository deploy keys (for ArgoCD) for. `auto-sync` determines whether or not the ArgoCD application will be automatically synced. + + `ignore-differences` determines whether or not the ArgoCD application will ignore the number of + replicas in the deployment. Read more on ignore differences here: + https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/#respect-ignore-difference-configs + + Example: + ``` + tenant: plat + environment: use1 + stage: sandbox + auto-sync: true + ignore-differences: + - group: apps + kind: Deployment + json-pointers: + - /spec/replicas + ``` EOT default = [] } @@ -104,8 +127,61 @@ variable "permissions" { } } -variable "slack_channel" { +variable "github_default_notifications_enabled" { type = string - description = "The name of the slack channel to configure ArgoCD notifications for" - default = null + description = "Enable default GitHub commit statuses notifications (required for CD sync mode)" + default = true +} + +variable "create_repo" { + type = bool + description = "Whether or not to create the repository or use an existing one" + default = true +} + +variable "required_pull_request_reviews" { + type = bool + description = "Enforce restrictions for pull request reviews" + default = true +} + +variable "push_restrictions_enabled" { + type = bool + description = "Enforce who can push to the main branch" + default = true +} + +variable "vulnerability_alerts_enabled" { + type = bool + description = "Enable security alerts for vulnerable dependencies" + default = false +} + +variable "slack_notifications_channel" { + type = string + default = "" + description = "If given, the Slack channel to for deployment notifications." +} + +variable "manifest_kubernetes_namespace" { + type = string + default = "argocd" + description = "The namespace used for the ArgoCD application" +} + +variable "github_notifications" { + type = list(string) + default = [ + "notifications.argoproj.io/subscribe.on-deploy-started.app-repo-github-commit-status: \"\"", + "notifications.argoproj.io/subscribe.on-deploy-started.argocd-repo-github-commit-status: \"\"", + "notifications.argoproj.io/subscribe.on-deploy-succeded.app-repo-github-commit-status: \"\"", + "notifications.argoproj.io/subscribe.on-deploy-succeded.argocd-repo-github-commit-status: \"\"", + "notifications.argoproj.io/subscribe.on-deploy-failed.app-repo-github-commit-status: \"\"", + "notifications.argoproj.io/subscribe.on-deploy-failed.argocd-repo-github-commit-status: \"\"", + ] + description = < ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [athena](#module\_athena) | cloudposse/athena/aws | 0.1.0 | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [athena](#module\_athena) | cloudposse/athena/aws | 0.1.1 | +| [cloudtrail\_bucket](#module\_cloudtrail\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources -No resources. +| Name | Type | +|------|------| +| [aws_athena_named_query.cloudtrail_query_alter_tables](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_named_query) | resource | +| [aws_athena_named_query.cloudtrail_query_create_tables](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_named_query) | resource | ## Inputs @@ -91,6 +159,8 @@ No resources. | [athena\_s3\_bucket\_id](#input\_athena\_s3\_bucket\_id) | Use an existing S3 bucket for Athena query results if `create_s3_bucket` is `false`. | `string` | `null` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [bytes\_scanned\_cutoff\_per\_query](#input\_bytes\_scanned\_cutoff\_per\_query) | Integer for the upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. Must be at least 10485760. | `number` | `null` | no | +| [cloudtrail\_bucket\_component\_name](#input\_cloudtrail\_bucket\_component\_name) | The name of the CloudTrail bucket component | `string` | `"cloudtrail-bucket"` | no | +| [cloudtrail\_database](#input\_cloudtrail\_database) | The name of the Athena Database to use for CloudTrail logs. If set, an Athena table will be created for the CloudTrail trail. | `string` | `""` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [create\_kms\_key](#input\_create\_kms\_key) | Enable the creation of a KMS key used by Athena workgroup. | `bool` | `true` | no | | [create\_s3\_bucket](#input\_create\_s3\_bucket) | Enable the creation of an S3 bucket to use for Athena query results | `bool` | `true` | no | @@ -102,8 +172,6 @@ No resources. | [enforce\_workgroup\_configuration](#input\_enforce\_workgroup\_configuration) | Boolean whether the settings for the workgroup override client-side settings. | `bool` | `true` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -134,8 +202,12 @@ No resources. | [s3\_bucket\_id](#output\_s3\_bucket\_id) | ID of S3 bucket used for Athena query results. | | [workgroup\_id](#output\_workgroup\_id) | ID of newly created Athena workgroup. | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/athena) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/athena) - + Cloud Posse's upstream component +- [Querying AWS CloudTrail logs with AWS Athena](https://docs.aws.amazon.com/athena/latest/ug/cloudtrail-logs.html) [](https://cpco.io/component) diff --git a/modules/athena/cloudtrail.tf b/modules/athena/cloudtrail.tf new file mode 100644 index 000000000..2740ca9d2 --- /dev/null +++ b/modules/athena/cloudtrail.tf @@ -0,0 +1,111 @@ + +# This file creates a table for Athena to query centralized Cloudtrail logs in S3. +# https://docs.aws.amazon.com/athena/latest/ug/cloudtrail-logs.html#create-cloudtrail-table-ct + +locals { + cloudtrail_enabled = module.this.enabled && length(var.cloudtrail_database) > 0 + cloudtrail_table_name = "%s_cloudtrail_logs" + + # s3://cloudtrail_bucket_name/AWSLogs/organization_id/Account_ID/CloudTrail/ + organization_id = module.account_map.outputs.org.id + cloudtrail_s3_bucket_id = module.cloudtrail_bucket[0].outputs.cloudtrail_bucket_id + cloudtrail_s3_location = "s3://${local.cloudtrail_s3_bucket_id}/AWSLogs/${local.organization_id}/%s/CloudTrail/" + + cloudtrail_query_create_table = <, + sessionissuer:STRUCT< + type:STRING, + principalId:STRING, + arn:STRING, + accountId:STRING, + userName:STRING>, + ec2RoleDelivery:string, + webIdFederationData:map + > +>, +eventtime STRING, +eventsource STRING, +eventname STRING, +awsregion STRING, +sourceipaddress STRING, +useragent STRING, +errorcode STRING, +errormessage STRING, +requestparameters STRING, +responseelements STRING, +additionaleventdata STRING, +requestid STRING, +eventid STRING, +resources ARRAY>, +eventtype STRING, +apiversion STRING, +readonly STRING, +recipientaccountid STRING, +serviceeventdetails STRING, +sharedeventid STRING, +vpcendpointid STRING, +tlsDetails struct< + tlsVersion:string, + cipherSuite:string, + clientProvidedHostHeader:string> +) +PARTITIONED BY (account string, region string, year string, month string, day string) +ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe' +STORED AS INPUTFORMAT 'com.amazon.emr.cloudtrail.CloudTrailInputFormat' +OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION '${local.cloudtrail_s3_location}' +EOT + + + account_name = lookup(module.this.descriptors, "account_name", module.this.stage) + account_id = module.account_map.outputs.full_account_map[local.account_name] + timestamp = timestamp() + current_year = formatdate("YYYY", local.timestamp) + current_month = formatdate("MM", local.timestamp) + current_day = formatdate("DD", local.timestamp) + + cloudtrail_query_alter_table = < ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | -| [mysql](#requirement\_mysql) | >= 1.9 | +| [aws](#requirement\_aws) | >= 4.0 | +| [mysql](#requirement\_mysql) | >= 3.0.22 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [mysql](#provider\_mysql) | >= 1.9 | +| [aws](#provider\_aws) | >= 4.0 | +| [mysql](#provider\_mysql) | >= 3.0.22 | ## Modules @@ -71,7 +84,7 @@ components: |------|--------|---------| | [additional\_grants](#module\_additional\_grants) | ./modules/mysql-user | n/a | | [additional\_users](#module\_additional\_users) | ./modules/mysql-user | n/a | -| [aurora\_mysql](#module\_aurora\_mysql) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [aurora\_mysql](#module\_aurora\_mysql) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -79,32 +92,31 @@ components: | Name | Type | |------|------| -| [mysql_database.additional](https://registry.terraform.io/providers/terraform-providers/mysql/latest/docs/resources/database) | resource | +| [mysql_database.additional](https://registry.terraform.io/providers/petoju/mysql/latest/docs/resources/database) | resource | +| [aws_ssm_parameter.admin_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | | [aws_ssm_parameter.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [additional\_databases](#input\_additional\_databases) | n/a | `set(string)` | `[]` | no | +| [additional\_databases](#input\_additional\_databases) | Additional databases to be created with the cluster | `set(string)` | `[]` | no | | [additional\_grants](#input\_additional\_grants) | Create additional database user with specified grants.
If `var.ssm_password_source` is set, passwords will be retrieved from SSM parameter store,
otherwise, passwords will be generated and stored in SSM parameter store under the service's key. |
map(list(object({
grant : list(string)
db : string
})))
| `{}` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [additional\_users](#input\_additional\_users) | Create additional database user for a service, specifying username, grants, and optional password.
If no password is specified, one will be generated. Username and password will be stored in
SSM parameter store under the service's key. |
map(object({
db_user : string
db_password : string
grants : list(object({
grant : list(string)
db : string
}))
}))
| `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [aurora\_mysql\_component\_name](#input\_aurora\_mysql\_component\_name) | Aurora MySQL component name to read the remote state from | `string` | n/a | yes | +| [aurora\_mysql\_component\_name](#input\_aurora\_mysql\_component\_name) | Aurora MySQL component name to read the remote state from | `string` | `"aurora-mysql"` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | -| [mysql\_admin\_password](#input\_mysql\_admin\_password) | MySQL password for the admin user | `string` | `""` | no | +| [mysql\_admin\_password](#input\_mysql\_admin\_password) | MySQL password for the admin user. If not provided, the password will be pulled from SSM | `string` | `""` | no | | [mysql\_cluster\_enabled](#input\_mysql\_cluster\_enabled) | Set to `false` to prevent the module from creating any resources | `string` | `true` | no | | [mysql\_db\_name](#input\_mysql\_db\_name) | Database name (default is not to create a database | `string` | `""` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | @@ -125,10 +137,11 @@ components: | [additional\_grants](#output\_additional\_grants) | Additional DB users created | | [additional\_users](#output\_additional\_users) | Additional DB users created | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/aurora-mysql-resources) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/aurora-mysql-resources) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/aurora-mysql-resources/main.tf b/modules/aurora-mysql-resources/main.tf index 6ed495bea..f53b112c9 100644 --- a/modules/aurora-mysql-resources/main.tf +++ b/modules/aurora-mysql-resources/main.tf @@ -8,9 +8,7 @@ locals { ssm_path_prefix = format("/%s/%s", var.ssm_path_prefix, module.aurora_mysql.outputs.aurora_mysql_cluster_id) ssm_password_source = length(var.ssm_password_source) > 0 ? var.ssm_password_source : format("%s/%s", local.ssm_path_prefix, "%s/password") - password_users_to_fetch = local.read_passwords_from_ssm ? toset(concat(["admin"], keys(var.additional_grants))) : [] - - mysql_admin_password = length(var.mysql_admin_password) > 0 ? var.mysql_admin_password : data.aws_ssm_parameter.password["admin"].value + password_users_to_fetch = local.read_passwords_from_ssm ? toset(keys(var.additional_grants)) : [] kms_key_arn = module.aurora_mysql.outputs.kms_key_arn } @@ -68,4 +66,3 @@ module "additional_grants" { context = module.this.context } - diff --git a/modules/aurora-mysql-resources/modules/mysql-user/default.auto.tfvars b/modules/aurora-mysql-resources/modules/mysql-user/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/aurora-mysql-resources/modules/mysql-user/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/aurora-mysql-resources/modules/mysql-user/main.tf b/modules/aurora-mysql-resources/modules/mysql-user/main.tf index 53be6e03b..70d071c08 100644 --- a/modules/aurora-mysql-resources/modules/mysql-user/main.tf +++ b/modules/aurora-mysql-resources/modules/mysql-user/main.tf @@ -4,7 +4,6 @@ locals { db_user = length(var.db_user) > 0 ? var.db_user : var.service_name db_password = length(var.db_password) > 0 ? var.db_password : join("", random_password.db_password.*.result) - create_db_user = local.enabled && var.service_name != local.db_user save_password_in_ssm = local.enabled && var.save_password_in_ssm db_password_key = format("%s/%s/passwords/%s", var.ssm_path_prefix, var.service_name, local.db_user) @@ -16,41 +15,13 @@ locals { overwrite = true } : null - parameter_write = (local.create_db_user && local.save_password_in_ssm) ? [local.db_password_ssm] : [] + parameter_write = local.save_password_in_ssm ? [local.db_password_ssm] : [] - # You cannot grant "ALL" to an RDS user because "ALL" includes privileges that - # Master does not have (because this is a managed database). + # You cannot grant "ALL" to an RDS user because "ALL" includes privileges that Master does not have (because this is a managed database). + # Instead, use "ALL PRIVILEGES" # See the full list of available options at https://docs.amazonaws.cn/en_us/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Security.html - # This is all the privileges an application should need. - # Privileges not listed are not available. - # Privileges commented out are dangerous or cannot be limited to 1 database and should not be needed by an app all_rds_app_grants = [ - "ALTER", - "ALTER ROUTINE", - "CREATE", - "CREATE ROUTINE", - "CREATE TEMPORARY TABLES", - # "CREATE USER", - "CREATE VIEW", - "DELETE", - "DROP", - "EVENT", - "EXECUTE", - # "GRANT OPTION", - "INDEX", - "INSERT", - "LOAD FROM S3", - "LOCK TABLES", - # "PROCESS", - "REFERENCES", - # "RELOAD", - # "REPLICATION CLIENT", - # "REPLICATION SLAVE", - "SELECT", - # "SHOW DATABASES", - "SHOW VIEW", - "TRIGGER", - "UPDATE" + "ALL PRIVILEGES" ] all_rds_other_grants = [ "CREATE USER", @@ -79,8 +50,6 @@ resource "mysql_user" "default" { user = local.db_user host = "%" plaintext_password = local.db_password - - depends_on = [var.instance_ids] } # Grant the user full access to this specific database @@ -100,10 +69,6 @@ resource "mysql_grant" "default" { )]) depends_on = [mysql_user.default] - # Apparently this is needed. See https://github.com/terraform-providers/terraform-provider-mysql/issues/55#issuecomment-615463296 - lifecycle { - create_before_destroy = true - } } module "parameter_store_write" { diff --git a/modules/aurora-mysql-resources/modules/mysql-user/variables.tf b/modules/aurora-mysql-resources/modules/mysql-user/variables.tf index c95f30442..8815038d0 100644 --- a/modules/aurora-mysql-resources/modules/mysql-user/variables.tf +++ b/modules/aurora-mysql-resources/modules/mysql-user/variables.tf @@ -46,4 +46,3 @@ variable "kms_key_id" { default = "alias/aws/rds" description = "KMS key ID, ARN, or alias to use for encrypting MySQL database" } - diff --git a/modules/aurora-mysql-resources/modules/mysql-user/versions.tf b/modules/aurora-mysql-resources/modules/mysql-user/versions.tf index 37e4284eb..c62bcb732 100644 --- a/modules/aurora-mysql-resources/modules/mysql-user/versions.tf +++ b/modules/aurora-mysql-resources/modules/mysql-user/versions.tf @@ -4,11 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } mysql = { - source = "terraform-providers/mysql" - version = ">= 1.9" + source = "petoju/mysql" + version = ">= 3.0.22" } random = { source = "hashicorp/random" diff --git a/modules/aurora-mysql-resources/outputs.tf b/modules/aurora-mysql-resources/outputs.tf index 898890f8e..850a1c210 100644 --- a/modules/aurora-mysql-resources/outputs.tf +++ b/modules/aurora-mysql-resources/outputs.tf @@ -7,4 +7,3 @@ output "additional_grants" { value = keys(module.additional_grants) description = "Additional DB users created" } - diff --git a/modules/aurora-mysql-resources/provider-mysql.tf b/modules/aurora-mysql-resources/provider-mysql.tf new file mode 100644 index 000000000..c3af1b0a9 --- /dev/null +++ b/modules/aurora-mysql-resources/provider-mysql.tf @@ -0,0 +1,31 @@ +variable "mysql_admin_password" { + type = string + description = "MySQL password for the admin user. If not provided, the password will be pulled from SSM" + default = "" +} + +locals { + cluster_endpoint = module.aurora_mysql.outputs.aurora_mysql_endpoint + + mysql_admin_user = module.aurora_mysql.outputs.aurora_mysql_master_username + mysql_admin_password_key = module.aurora_mysql.outputs.aurora_mysql_master_password_ssm_key + mysql_admin_password = local.enabled ? (length(var.mysql_admin_password) > 0 ? var.mysql_admin_password : data.aws_ssm_parameter.mysql_admin_password[0].value) : "" +} + +data "aws_ssm_parameter" "admin_password" { + count = local.enabled && !(length(var.mysql_admin_password) > 0) ? 1 : 0 + + name = local.mysql_admin_password_key + + with_decryption = true +} + +provider "mysql" { + endpoint = local.cluster_endpoint + username = local.mysql_admin_user + password = local.mysql_admin_password + + # Useful for debugging provider + # https://github.com/petoju/terraform-provider-mysql/blob/master/mysql/provider.go + connect_retry_timeout_sec = 60 +} diff --git a/modules/aurora-mysql-resources/providers.tf b/modules/aurora-mysql-resources/providers.tf index 93af053cb..ef923e10a 100644 --- a/modules/aurora-mysql-resources/providers.tf +++ b/modules/aurora-mysql-resources/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,21 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -provider "mysql" { - endpoint = module.aurora_mysql.outputs.aurora_mysql_endpoint - username = module.aurora_mysql.outputs.aurora_mysql_master_username - password = local.mysql_admin_password -} diff --git a/modules/aurora-mysql-resources/remote-state.tf b/modules/aurora-mysql-resources/remote-state.tf index e31385bac..33f457aca 100644 --- a/modules/aurora-mysql-resources/remote-state.tf +++ b/modules/aurora-mysql-resources/remote-state.tf @@ -1,6 +1,6 @@ module "aurora_mysql" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.aurora_mysql_component_name diff --git a/modules/aurora-mysql-resources/variables.tf b/modules/aurora-mysql-resources/variables.tf index 8e864b122..816f1bccd 100644 --- a/modules/aurora-mysql-resources/variables.tf +++ b/modules/aurora-mysql-resources/variables.tf @@ -6,6 +6,7 @@ variable "region" { variable "aurora_mysql_component_name" { type = string description = "Aurora MySQL component name to read the remote state from" + default = "aurora-mysql" } variable "read_passwords_from_ssm" { @@ -28,12 +29,6 @@ variable "ssm_password_source" { EOT } -variable "mysql_admin_password" { - type = string - description = "MySQL password for the admin user" - default = "" -} - variable "mysql_db_name" { type = string description = "Database name (default is not to create a database" @@ -47,8 +42,9 @@ variable "mysql_cluster_enabled" { } variable "additional_databases" { - type = set(string) - default = [] + type = set(string) + default = [] + description = "Additional databases to be created with the cluster" } variable "additional_users" { @@ -82,4 +78,3 @@ variable "additional_grants" { otherwise, passwords will be generated and stored in SSM parameter store under the service's key. EOT } - diff --git a/modules/aurora-mysql-resources/versions.tf b/modules/aurora-mysql-resources/versions.tf index 194539462..bfae21c8d 100644 --- a/modules/aurora-mysql-resources/versions.tf +++ b/modules/aurora-mysql-resources/versions.tf @@ -4,11 +4,14 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } + # terraform-providers/mysql is archived + # https://github.com/hashicorp/terraform-provider-mysql + # replacing with petoju/terraform-provider-mysql mysql = { - source = "terraform-providers/mysql" - version = ">= 1.9" + source = "petoju/mysql" + version = ">= 3.0.22" } } } diff --git a/modules/aurora-mysql/README.md b/modules/aurora-mysql/README.md index e048aa0df..169d9451a 100644 --- a/modules/aurora-mysql/README.md +++ b/modules/aurora-mysql/README.md @@ -1,7 +1,14 @@ +--- +tags: + - component/aurora-mysql + - layer/data + - provider/aws +--- + # Component: `aurora-mysql` -This component is responsible for provisioning Aurora MySQL RDS clusters. -It seeds relevant database information (hostnames, username, password, etc.) into AWS SSM Parameter Store. +This component is responsible for provisioning Aurora MySQL RDS clusters. It seeds relevant database information +(hostnames, username, password, etc.) into AWS SSM Parameter Store. ## Usage @@ -73,22 +80,27 @@ components: - aurora-mysql/defaults vars: instance_type: db.r5.large - cluster_size: 1 - cluster_name: main - database_name: main + mysql_cluster_size: 1 + mysql_name: main + mysql_db_name: main ``` -Example deployment with primary cluster deployed to us-east-1 in a `platform-dev` account: `atmos terraform apply aurora-mysql/dev -s platform-use1-dev` +Example deployment with primary cluster deployed to us-east-1 in a `platform-dev` account: +`atmos terraform apply aurora-mysql/dev -s platform-use1-dev` ## Disaster Recovery with Cross-Region Replication -This component is designed to support cross-region replication with continuous replication. If enabled and deployed, a secondary cluster will be deployed in a different region than the primary cluster. This approach is highly aggresive and costly, but in a disaster scenario where the primary cluster fails, the secondary cluster can be promoted to take its place. Follow these steps to handle a Disaster Recovery. +This component is designed to support cross-region replication with continuous replication. If enabled and deployed, a +secondary cluster will be deployed in a different region than the primary cluster. This approach is highly aggressive and +costly, but in a disaster scenario where the primary cluster fails, the secondary cluster can be promoted to take its +place. Follow these steps to handle a Disaster Recovery. ### Usage To deploy a secondary cluster for cross-region replication, add the following catalog entries to an alternative region: -Default settings for a secondary, replica cluster. For this example, this file is saved as `stacks/catalog/aurora-mysql/replica/defaults.yaml` +Default settings for a secondary, replica cluster. For this example, this file is saved as +`stacks/catalog/aurora-mysql/replica/defaults.yaml` ```yaml import: @@ -106,7 +118,7 @@ components: allowed_cidr_blocks: # all automation in primary region (where Spacelift is deployed) - 10.128.0.0/22 - # all corp in the same region as this cluster + # all corp in the same region as this cluster - 10.132.16.0/22 mysql_instance_type: "db.t3.medium" mysql_name: "replica" @@ -136,34 +148,37 @@ components: ### Promoting the Read Replica -Promoting an existing RDS Replicate cluster to a fully standalone cluster is not currently supported by Terraform: https://github.com/hashicorp/terraform-provider-aws/issues/6749 +Promoting an existing RDS Replicate cluster to a fully standalone cluster is not currently supported by Terraform: +https://github.com/hashicorp/terraform-provider-aws/issues/6749 -Instead, promote the Replicate cluster with the AWS CLI command: `aws rds promote-read-replica-db-cluster --db-cluster-identifier ` +Instead, promote the Replicate cluster with the AWS CLI command: +`aws rds promote-read-replica-db-cluster --db-cluster-identifier ` -After promoting the replica, update the stack configuration to prevent future Terrafrom runs from re-enabling replication. In this example, modify `stacks/catalog/aurora-mysql/replica/defaults.yaml` +After promoting the replica, update the stack configuration to prevent future Terrafrom runs from re-enabling +replication. In this example, modify `stacks/catalog/aurora-mysql/replica/defaults.yaml` ```yaml is_promoted_read_replica: true ``` -Reploying the component should show no changes. For example, `atmos terraform apply aurora-mysql/dev -s platform-use2-dev` - +Reploying the component should show no changes. For example, +`atmos terraform apply aurora-mysql/dev -s platform-use2-dev` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | -| [mysql](#requirement\_mysql) | >= 1.9 | +| [aws](#requirement\_aws) | >= 4.0 | | [random](#requirement\_random) | >= 2.2 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.0 | | [random](#provider\_random) | >= 2.2 | ## Modules @@ -172,14 +187,15 @@ Reploying the component should show no changes. For example, `atmos terraform ap |------|--------|---------| | [aurora\_mysql](#module\_aurora\_mysql) | cloudposse/rds-cluster/aws | 1.3.1 | | [cluster](#module\_cluster) | cloudposse/label/null | 0.25.0 | -| [dns-delegated](#module\_dns-delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [dns-delegated](#module\_dns-delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [kms\_key\_rds](#module\_kms\_key\_rds) | cloudposse/kms-key/aws | 0.12.1 | -| [parameter\_store\_write](#module\_parameter\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.10.0 | -| [primary\_cluster](#module\_primary\_cluster) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [parameter\_store\_write](#module\_parameter\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.11.0 | +| [primary\_cluster](#module\_primary\_cluster) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -198,6 +214,7 @@ Reploying the component should show no changes. For example, `atmos terraform ap | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [allow\_ingress\_from\_vpc\_accounts](#input\_allow\_ingress\_from\_vpc\_accounts) | List of account contexts to pull VPC ingress CIDR and add to cluster security group.

e.g.
{
environment = "ue2",
stage = "auto",
tenant = "core"
}

Defaults to the "vpc" component in the given account |
list(object({
vpc = optional(string, "vpc")
environment = optional(string)
stage = optional(string)
tenant = optional(string)
}))
| `[]` | no | | [allowed\_cidr\_blocks](#input\_allowed\_cidr\_blocks) | List of CIDR blocks to be allowed to connect to the RDS cluster | `list(string)` | `[]` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [aurora\_mysql\_cluster\_family](#input\_aurora\_mysql\_cluster\_family) | DBParameterGroupFamily (e.g. `aurora5.6`, `aurora-mysql5.7` for Aurora MySQL databases). See https://stackoverflow.com/a/55819394 for help finding the right one to use. | `string` | n/a | yes | @@ -213,8 +230,6 @@ Reploying the component should show no changes. For example, `atmos terraform ap | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [is\_promoted\_read\_replica](#input\_is\_promoted\_read\_replica) | If `true`, do not assign a Replication Source to the Cluster. Set to `true` after manually promoting the cluster from a replica to a standalone cluster. | `bool` | `false` | no | | [is\_read\_replica](#input\_is\_read\_replica) | If `true`, create this DB cluster as a Read Replica. | `bool` | `false` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | @@ -239,15 +254,16 @@ Reploying the component should show no changes. For example, `atmos terraform ap | [performance\_insights\_enabled](#input\_performance\_insights\_enabled) | Set `true` to enable Performance Insights | `bool` | `false` | no | | [primary\_cluster\_component](#input\_primary\_cluster\_component) | If this cluster is a read replica and no replication source is explicitly given, the component name for the primary cluster | `string` | `"aurora-mysql"` | no | | [primary\_cluster\_region](#input\_primary\_cluster\_region) | If this cluster is a read replica and no replication source is explicitly given, the region to look for a matching cluster | `string` | `""` | no | -| [publicly\_accessible](#input\_publicly\_accessible) | n/a | `bool` | `false` | no | +| [publicly\_accessible](#input\_publicly\_accessible) | Set to true to create the cluster in a public subnet | `bool` | `false` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [replication\_source\_identifier](#input\_replication\_source\_identifier) | ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica.
If this value is empty and replication is enabled, remote state will attempt to find
a matching cluster in the Primary DB Cluster's region | `string` | `""` | no | -| [ssm\_password\_source](#input\_ssm\_password\_source) | If `var.ssm_passwords_enabled` is `true`, DB user passwords will be retrieved from SSM using
`var.ssm_password_source` and the database username. If this value is not set,
a default path will be created using the SSM path prefix and ID of the associated Aurora Cluster. | `string` | `""` | no | +| [replication\_source\_identifier](#input\_replication\_source\_identifier) | ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica.
If this value is empty and replication is enabled, remote state will attempt to find
a matching cluster in the Primary DB Cluster's region | `string` | `""` | no | +| [ssm\_password\_source](#input\_ssm\_password\_source) | If `var.ssm_passwords_enabled` is `true`, DB user passwords will be retrieved from SSM using
`var.ssm_password_source` and the database username. If this value is not set,
a default path will be created using the SSM path prefix and ID of the associated Aurora Cluster. | `string` | `""` | no | | [ssm\_path\_prefix](#input\_ssm\_path\_prefix) | SSM path prefix | `string` | `"rds"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | The name of the VPC component | `string` | `"vpc"` | no | ## Outputs @@ -266,10 +282,11 @@ Reploying the component should show no changes. For example, `atmos terraform ap | [cluster\_domain](#output\_cluster\_domain) | Cluster DNS name | | [kms\_key\_arn](#output\_kms\_key\_arn) | KMS key ARN for Aurora MySQL | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/aurora-mysql) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/aurora-mysql) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/aurora-mysql/cluster-regional.tf b/modules/aurora-mysql/cluster-regional.tf index 109a03e48..d09d90390 100644 --- a/modules/aurora-mysql/cluster-regional.tf +++ b/modules/aurora-mysql/cluster-regional.tf @@ -21,7 +21,7 @@ module "aurora_mysql" { vpc_id = local.vpc_id publicly_accessible = var.publicly_accessible subnets = var.publicly_accessible ? local.public_subnet_ids : local.private_subnet_ids - allowed_cidr_blocks = var.publicly_accessible ? coalescelist(var.allowed_cidr_blocks, ["0.0.0.0/0"]) : var.allowed_cidr_blocks + allowed_cidr_blocks = local.allowed_cidr_blocks security_groups = local.eks_cluster_managed_security_group_ids zone_id = local.zone_id diff --git a/modules/aurora-mysql/default.auto.tfvars b/modules/aurora-mysql/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/aurora-mysql/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/aurora-mysql/main.tf b/modules/aurora-mysql/main.tf index 2c1f0dac0..097d6f5c6 100644 --- a/modules/aurora-mysql/main.tf +++ b/modules/aurora-mysql/main.tf @@ -14,15 +14,15 @@ locals { is_read_replica = local.enabled && var.is_read_replica remote_read_replica_enabled = local.is_read_replica && !(length(var.replication_source_identifier) > 0) && length(var.primary_cluster_region) > 0 - # Removing the replicate source attribute from an existing RDS Replicate database managed by Terraform - # should promote the database to a fully standalone database but currently is not supported by Terraform. + # Removing the replicate source attribute from an existing RDS Replicate database managed by Terraform + # should promote the database to a fully standalone database but currently is not supported by Terraform. # Instead, first manually promote with the AWS CLI or console, and then remove the replication source identitier from the Terrafrom state # See https://github.com/hashicorp/terraform-provider-aws/issues/6749 replication_source_identifier = local.remote_read_replica_enabled && !var.is_promoted_read_replica ? module.primary_cluster[0].outputs.aurora_mysql_cluster_arn : var.replication_source_identifier # For encrypted cross-region replica, kmsKeyId should be explicitly specified # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html - # A read replica of an Amazon RDS encrypted instance must be encrypted using the same KMS key as the primary DB instance when both are in the same AWS Region. + # A read replica of an Amazon RDS encrypted instance must be encrypted using the same KMS key as the primary DB instance when both are in the same AWS Region. # If the primary DB instance and read replica are in different AWS Regions, you encrypt the read replica using a KMS key in that AWS Region. kms_key_arn = module.kms_key_rds.key_arn @@ -37,6 +37,18 @@ locals { cluster_domain = trimprefix(module.aurora_mysql.endpoint, "${module.aurora_mysql.cluster_identifier}.cluster-") cluster_subdomain = var.mysql_name == "" ? module.this.name : "${var.mysql_name}.${module.this.name}" + + # Join a list of all allowed cidr blocks from: + # 1. VPCs from all given accounts + # 2. Additionally given CIDR blocks + all_allowed_cidr_blocks = concat( + var.allowed_cidr_blocks, + [ + for k in keys(module.vpc_ingress) : + module.vpc_ingress[k].outputs.vpc_cidr + ] + ) + allowed_cidr_blocks = var.publicly_accessible ? coalescelist(local.all_allowed_cidr_blocks, ["0.0.0.0/0"]) : local.all_allowed_cidr_blocks } module "cluster" { @@ -70,5 +82,3 @@ resource "random_pet" "mysql_db_name" { db_name = var.mysql_db_name } } - - diff --git a/modules/aurora-mysql/outputs.tf b/modules/aurora-mysql/outputs.tf index 319b4aa68..eafb28aeb 100644 --- a/modules/aurora-mysql/outputs.tf +++ b/modules/aurora-mysql/outputs.tf @@ -59,4 +59,3 @@ output "kms_key_arn" { value = module.kms_key_rds.key_arn description = "KMS key ARN for Aurora MySQL" } - diff --git a/modules/aurora-mysql/providers.tf b/modules/aurora-mysql/providers.tf index a989bcb9c..ef923e10a 100644 --- a/modules/aurora-mysql/providers.tf +++ b/modules/aurora-mysql/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,21 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -provider "mysql" { - endpoint = time_sleep.mysql_cluster_propagation[0].triggers["endpoint"] - username = time_sleep.mysql_cluster_propagation[0].triggers["username"] - password = local.mysql_admin_password -} diff --git a/modules/aurora-mysql/remote-state.tf b/modules/aurora-mysql/remote-state.tf index f54100557..5f8201069 100644 --- a/modules/aurora-mysql/remote-state.tf +++ b/modules/aurora-mysql/remote-state.tf @@ -1,6 +1,10 @@ +locals { + accounts_with_vpc = { for i, account in var.allow_ingress_from_vpc_accounts : try(account.tenant, module.this.tenant) != null ? format("%s-%s", account.tenant, account.stage) : account.stage => account } +} + module "dns-delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "dns-delegated" environment = "gbl" @@ -10,7 +14,7 @@ module "dns-delegated" { module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" for_each = var.eks_component_names @@ -21,16 +25,31 @@ module "eks" { module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" + + component = var.vpc_component_name + + context = module.this.context +} + +module "vpc_ingress" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - component = "vpc" + for_each = local.accounts_with_vpc + + component = each.value.vpc + environment = try(each.value.environment, module.this.environment) + stage = try(each.value.stage, module.this.environment) + tenant = try(each.value.tenant, module.this.tenant) context = module.this.context } + module "primary_cluster" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" count = local.remote_read_replica_enabled ? 1 : 0 diff --git a/modules/aurora-mysql/ssm.tf b/modules/aurora-mysql/ssm.tf index 8dbf1532f..34db56720 100644 --- a/modules/aurora-mysql/ssm.tf +++ b/modules/aurora-mysql/ssm.tf @@ -26,13 +26,6 @@ locals { type = "String" overwrite = true }, - { - name = format("%s/%s", local.ssm_path_prefix, "replicas_hostname") - value = module.aurora_mysql.replicas_host - description = "Aurora MySQL DB Replicas hostname" - type = "String" - overwrite = true - }, { name = format("%s/%s", local.ssm_path_prefix, "cluster_name") value = module.aurora_mysql.cluster_identifier @@ -41,6 +34,15 @@ locals { overwrite = true } ] + cluster_parameters = var.mysql_cluster_size > 0 ? [ + { + name = format("%s/%s", local.ssm_path_prefix, "replicas_hostname") + value = module.aurora_mysql.replicas_host + description = "Aurora MySQL DB Replicas hostname" + type = "String" + overwrite = true + }, + ] : [] admin_user_parameters = [ { name = local.mysql_admin_user_key @@ -58,7 +60,7 @@ locals { } ] - parameter_write = local.mysql_db_enabled ? concat(local.default_parameters, local.admin_user_parameters) : local.default_parameters + parameter_write = local.mysql_db_enabled ? concat(local.default_parameters, local.cluster_parameters, local.admin_user_parameters) : concat(local.default_parameters, local.cluster_parameters) } data "aws_ssm_parameter" "password" { @@ -71,13 +73,12 @@ data "aws_ssm_parameter" "password" { module "parameter_store_write" { source = "cloudposse/ssm-parameter-store/aws" - version = "0.10.0" + version = "0.11.0" # kms_arn will only be used for SecureString parameters kms_arn = module.kms_key_rds.key_arn parameter_write = local.parameter_write - context = module.this.context + context = module.cluster.context } - diff --git a/modules/aurora-mysql/variables.tf b/modules/aurora-mysql/variables.tf index 87f88c73e..3297da7d5 100644 --- a/modules/aurora-mysql/variables.tf +++ b/modules/aurora-mysql/variables.tf @@ -13,8 +13,8 @@ variable "ssm_password_source" { type = string default = "" description = <<-EOT - If `var.ssm_passwords_enabled` is `true`, DB user passwords will be retrieved from SSM using - `var.ssm_password_source` and the database username. If this value is not set, + If `var.ssm_passwords_enabled` is `true`, DB user passwords will be retrieved from SSM using + `var.ssm_password_source` and the database username. If this value is not set, a default path will be created using the SSM path prefix and ID of the associated Aurora Cluster. EOT } @@ -153,8 +153,9 @@ variable "auto_minor_version_upgrade" { } variable "publicly_accessible" { - type = bool - default = false + type = bool + default = false + description = "Set to true to create the cluster in a public subnet" } variable "eks_component_names" { @@ -166,8 +167,8 @@ variable "eks_component_names" { variable "replication_source_identifier" { type = string description = <<-EOT - ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. - If this value is empty and replication is enabled, remote state will attempt to find + ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. + If this value is empty and replication is enabled, remote state will attempt to find a matching cluster in the Primary DB Cluster's region EOT default = "" @@ -197,3 +198,30 @@ variable "primary_cluster_component" { default = "aurora-mysql" } +variable "allow_ingress_from_vpc_accounts" { + type = list(object({ + vpc = optional(string, "vpc") + environment = optional(string) + stage = optional(string) + tenant = optional(string) + })) + default = [] + description = <<-EOF + List of account contexts to pull VPC ingress CIDR and add to cluster security group. + + e.g. + { + environment = "ue2", + stage = "auto", + tenant = "core" + } + + Defaults to the "vpc" component in the given account + EOF +} + +variable "vpc_component_name" { + type = string + default = "vpc" + description = "The name of the VPC component" +} diff --git a/modules/aurora-mysql/versions.tf b/modules/aurora-mysql/versions.tf index 37e4284eb..06ec5fbfa 100644 --- a/modules/aurora-mysql/versions.tf +++ b/modules/aurora-mysql/versions.tf @@ -4,11 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" - } - mysql = { - source = "terraform-providers/mysql" - version = ">= 1.9" + version = ">= 4.0" } random = { source = "hashicorp/random" diff --git a/modules/aurora-postgres-resources/README.md b/modules/aurora-postgres-resources/README.md index a3cefaab4..58e3c3ead 100644 --- a/modules/aurora-postgres-resources/README.md +++ b/modules/aurora-postgres-resources/README.md @@ -1,6 +1,14 @@ +--- +tags: + - component/aurora-postgres-resources + - layer/data + - provider/aws +--- + # Component: `aurora-postgres-resources` -This component is responsible for provisioning Aurora Postgres resources: additional databases, users, permissions, grants, etc. +This component is responsible for provisioning Aurora Postgres resources: additional databases, users, permissions, +grants, etc. ## Usage @@ -19,37 +27,77 @@ components: db_user: example db_password: "" grants: - - grant: [ "ALL" ] + - grant: ["ALL"] db: example object_type: database - schema: null + schema: "" ``` +## PostgreSQL Quick Reference on Grants + +GRANTS can be on database, schema, role, table, and other database objects (e.g. columns in a table for fine control). +Database and schema do not have much to grant. The `object_type` field in the input determines which kind of object the +grant is being applied to. The `db` field is always required. The `schema` field is required unless the `object_type` is +`db`, in which case it should be set to the empty string (`""`). + +The keyword PUBLIC indicates that the privileges are to be granted to all roles, including those that might be created +later. PUBLIC can be thought of as an implicitly defined group that always includes all roles. Any particular role will +have the sum of privileges granted directly to it, privileges granted to any role it is presently a member of, and +privileges granted to PUBLIC. + +When an object is created, it is assigned an owner. The owner is normally the role that executed the creation statement. +For most kinds of objects, the initial state is that only the owner (or a superuser) can do anything with the object. To +allow other roles to use it, privileges must be granted. (When using AWS managed RDS, you cannot have access to any +superuser roles; superuser is reserved for AWS to use to manage the cluster.) + +PostgreSQL grants privileges on some types of objects to PUBLIC by default when the objects are created. No privileges +are granted to PUBLIC by default on tables, table columns, sequences, foreign data wrappers, foreign servers, large +objects, schemas, or tablespaces. For other types of objects, the default privileges granted to PUBLIC are as follows: +CONNECT and TEMPORARY (create temporary tables) privileges for databases; EXECUTE privilege for functions and +procedures; and USAGE privilege for languages and data types (including domains). The object owner can, of course, +REVOKE both default and expressly granted privileges. (For maximum security, issue the REVOKE in the same transaction +that creates the object; then there is no window in which another user can use the object.) Also, these default +privilege settings can be overridden using the ALTER DEFAULT PRIVILEGES command. + +The CREATE privilege: + +- For databases, allows new schemas and publications to be created within the database, and allows trusted extensions to + be installed within the database. +- For schemas, allows new objects to be created within the schema. To rename an existing object, you must own the object + and have this privilege for the containing schema. + +For databases and schemas, there are not a lot of other privileges to grant, and all but CREATE are granted by default, +so you might as well grant "ALL". For tables etc., the creator has full control. You grant access to other users via +explicit grants. This component does not allow fine-grained grants. You have to specify the database, and unless the +grant is on the database, you have to specify the schema. For any other object type (table, sequence, function, +procedure, routine, foreign_data_wrapper, foreign_server, column), the component applies the grants to all objects of +that type in the specified schema. + + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 3.0 | -| [postgresql](#requirement\_postgresql) | >= 1.11.2 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [postgresql](#requirement\_postgresql) | >= 1.17.1 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.0 | -| [postgresql](#provider\_postgresql) | >= 1.11.2 | +| [aws](#provider\_aws) | >= 4.9.0 | +| [postgresql](#provider\_postgresql) | >= 1.17.1 | ## Modules | Name | Source | Version | |------|--------|---------| +| [additional\_grants](#module\_additional\_grants) | ./modules/postgresql-user | n/a | | [additional\_users](#module\_additional\_users) | ./modules/postgresql-user | n/a | -| [aurora\_postgres](#module\_aurora\_postgres) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [aurora\_postgres](#module\_aurora\_postgres) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [read\_only\_cluster\_user](#module\_read\_only\_cluster\_user) | ./modules/postgresql-user | n/a | -| [read\_only\_db\_users](#module\_read\_only\_db\_users) | ./modules/postgresql-user | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -57,35 +105,41 @@ components: | Name | Type | |------|------| | [postgresql_database.additional](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/database) | resource | -| [postgresql_default_privileges.read_only_tables_cluster](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/default_privileges) | resource | -| [postgresql_default_privileges.read_only_tables_users](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/default_privileges) | resource | +| [postgresql_schema.additional](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/schema) | resource | | [aws_ssm_parameter.admin_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [additional\_databases](#input\_additional\_databases) | Define additional databases to create. | `set(string)` | `[]` | no | +| [additional\_databases](#input\_additional\_databases) | Additional databases to be created with the cluster | `set(string)` | `[]` | no | +| [additional\_grants](#input\_additional\_grants) | Create additional database user with specified grants.
If `var.ssm_password_source` is set, passwords will be retrieved from SSM parameter store,
otherwise, passwords will be generated and stored in SSM parameter store under the service's key. |
map(list(object({
grant : list(string)
db : string
})))
| `{}` | no | +| [additional\_schemas](#input\_additional\_schemas) | Create additional schemas for a given database.
If no database is given, the schema will use the database used by the provider configuration |
map(object({
database : string
}))
| `{}` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | -| [additional\_users](#input\_additional\_users) | Define additional users to create. |
map(object({
db_user : string
db_password : string
grants : list(object({
grant : list(string)
db : string
schema : string
object_type : string
}))
}))
| `{}` | no | +| [additional\_users](#input\_additional\_users) | Create additional database user for a service, specifying username, grants, and optional password.
If no password is specified, one will be generated. Username and password will be stored in
SSM parameter store under the service's key. |
map(object({
db_user : string
db_password : string
grants : list(object({
grant : list(string)
db : string
schema : string
object_type : string
}))
}))
| `{}` | no | +| [admin\_password](#input\_admin\_password) | postgresql password for the admin user | `string` | `""` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [aurora\_postgres\_component\_name](#input\_aurora\_postgres\_component\_name) | Aurora Postgres component name to read the remote state from | `string` | n/a | yes | +| [aurora\_postgres\_component\_name](#input\_aurora\_postgres\_component\_name) | Aurora Postgres component name to read the remote state from | `string` | `"aurora-postgres"` | no | +| [cluster\_enabled](#input\_cluster\_enabled) | Set to `false` to prevent the module from creating any resources | `string` | `true` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [db\_name](#input\_db\_name) | Database name (default is not to create a database) | `string` | `""` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [read\_passwords\_from\_ssm](#input\_read\_passwords\_from\_ssm) | When `true`, fetch user passwords from SSM | `bool` | `true` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [ssm\_password\_source](#input\_ssm\_password\_source) | If var.read\_passwords\_from\_ssm is true, DB user passwords will be retrieved from SSM using `var.ssm_password_source` and the database username. If this value is not set, a default path will be created using the SSM path prefix and ID of the associated Aurora Cluster. | `string` | `""` | no | +| [ssm\_path\_prefix](#input\_ssm\_path\_prefix) | SSM path prefix | `string` | `"aurora-postgres"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -95,13 +149,19 @@ components: | Name | Description | |------|-------------| | [additional\_databases](#output\_additional\_databases) | Additional databases | +| [additional\_grants](#output\_additional\_grants) | Additional grants | +| [additional\_schemas](#output\_additional\_schemas) | Additional schemas | | [additional\_users](#output\_additional\_users) | Additional users | -| [read\_only\_users](#output\_read\_only\_users) | Read-only users | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/aurora-postgres-resources) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/aurora-postgres-resources) - + Cloud Posse's upstream component + +- PostgreSQL references (select the correct version of PostgreSQL at the top of the page): + - [GRANT command](https://www.postgresql.org/docs/14/sql-grant.html) + - [Privileges that can be GRANTed](https://www.postgresql.org/docs/14/ddl-priv.html) [](https://cpco.io/component) diff --git a/modules/aurora-postgres-resources/additional-databases.tf b/modules/aurora-postgres-resources/additional-databases.tf deleted file mode 100644 index 370a24120..000000000 --- a/modules/aurora-postgres-resources/additional-databases.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "postgresql_database" "additional" { - for_each = local.enabled ? var.additional_databases : [] - name = each.key -} diff --git a/modules/aurora-postgres-resources/additional-users.tf b/modules/aurora-postgres-resources/additional-users.tf deleted file mode 100644 index 3dc02cbff..000000000 --- a/modules/aurora-postgres-resources/additional-users.tf +++ /dev/null @@ -1,19 +0,0 @@ -module "additional_users" { - source = "./modules/postgresql-user" - - for_each = var.additional_users - - enabled = local.enabled - - service_name = each.key - db_user = each.value.db_user - db_password = each.value.db_password - grants = each.value.grants - ssm_path_prefix = join("/", compact([local.ssm_path_prefix, local.cluster_name, "service"])) - - context = module.this.context - - depends_on = [ - postgresql_database.additional - ] -} diff --git a/modules/aurora-postgres-resources/default.auto.tfvars b/modules/aurora-postgres-resources/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/aurora-postgres-resources/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/aurora-postgres-resources/main.tf b/modules/aurora-postgres-resources/main.tf index 15c4bfdf5..2144e4e25 100644 --- a/modules/aurora-postgres-resources/main.tf +++ b/modules/aurora-postgres-resources/main.tf @@ -1,18 +1,77 @@ locals { enabled = module.this.enabled - cluster_endpoint = try(module.aurora_postgres.outputs.primary_aurora_postgres_master_endpoint, module.aurora_postgres.outputs.endpoint) - cluster_name = try(module.aurora_postgres.outputs.primary_aurora_postgres_cluster_identifier, null) - database_name = try(module.aurora_postgres.outputs.aurora_postgres_database_name, module.aurora_postgres.outputs.database_name) - admin_user = try(module.aurora_postgres.outputs.aurora_postgres_admin_username, module.aurora_postgres.outputs.admin_username) - ssm_path_prefix = try(module.aurora_postgres.outputs.aurora_postgres_ssm_path_prefix, module.aurora_postgres.outputs.ssm_cluster_key_prefix) - admin_password_ssm_parameter = try(module.aurora_postgres.outputs.aurora_postgres_master_password_ssm_key, module.aurora_postgres.outputs.config_map.password_ssm_key) - admin_password = join("", data.aws_ssm_parameter.admin_password[*].value) + # If pulling passwords from SSM, determine the SSM path for passwords for each user + # example SSM password source: /rds/acme-platform-use1-dev-rds-shared/%s/password + read_passwords_from_ssm = local.enabled && var.read_passwords_from_ssm + password_users_to_fetch = local.read_passwords_from_ssm ? toset(keys(var.additional_grants)) : [] + ssm_path_prefix = format("/%s/%s", var.ssm_path_prefix, module.aurora_postgres.outputs.cluster_identifier) + ssm_password_source = length(var.ssm_password_source) > 0 ? var.ssm_password_source : format("%s/%s", local.ssm_path_prefix, "%s/password") + + kms_key_arn = module.aurora_postgres.outputs.kms_key_arn + + default_schema_owner = "postgres" } -data "aws_ssm_parameter" "admin_password" { - count = local.enabled ? 1 : 0 +data "aws_ssm_parameter" "password" { + for_each = local.password_users_to_fetch + + name = format(local.ssm_password_source, each.key) - name = local.admin_password_ssm_parameter with_decryption = true } + +resource "postgresql_database" "additional" { + for_each = local.enabled ? var.additional_databases : [] + + name = each.key +} + +resource "postgresql_schema" "additional" { + for_each = local.enabled ? var.additional_schemas : {} + + name = each.key + database = try(each.value.database, null) # If null, the database used by your provider configuration +} + +module "additional_users" { + for_each = local.enabled ? var.additional_users : {} + source = "./modules/postgresql-user" + + service_name = each.key + db_user = each.value.db_user + db_password = each.value.db_password + grants = each.value.grants + ssm_path_prefix = local.ssm_path_prefix + kms_key_id = local.kms_key_arn + + depends_on = [ + postgresql_database.additional, + postgresql_schema.additional, + ] + + context = module.this.context +} + +module "additional_grants" { + for_each = var.additional_grants + source = "./modules/postgresql-user" + + service_name = each.key + grants = each.value + kms_key_id = local.kms_key_arn + + # If `read_passwords_from_ssm` is true, that means passwords already exist in SSM + # If no password is given, a random password will be created + db_password = local.read_passwords_from_ssm ? data.aws_ssm_parameter.password[each.key].value : "" + # If generating a password, store it in SSM. Otherwise, we don't need to save an existing password in SSM + save_password_in_ssm = local.read_passwords_from_ssm ? false : true + ssm_path_prefix = local.ssm_path_prefix + + depends_on = [ + postgresql_database.additional, + postgresql_schema.additional, + ] + + context = module.this.context +} diff --git a/modules/aurora-postgres-resources/modules/postgresql-user/main.tf b/modules/aurora-postgres-resources/modules/postgresql-user/main.tf index e4db84910..0cb4c2b95 100644 --- a/modules/aurora-postgres-resources/modules/postgresql-user/main.tf +++ b/modules/aurora-postgres-resources/modules/postgresql-user/main.tf @@ -1,12 +1,32 @@ locals { - db_user = length(var.db_user) > 0 ? var.db_user : var.service_name - db_password = length(var.db_password) > 0 ? var.db_password : join("", random_password.db_password.*.result) - db_user_key = format("%s/%s/%s", var.ssm_path_prefix, var.service_name, "db_user") - db_password_key = format("%s/%s/%s", var.ssm_path_prefix, var.service_name, "db_password") + enabled = module.this.enabled + + db_user = length(var.db_user) > 0 ? var.db_user : var.service_name + db_password = length(var.db_password) > 0 ? var.db_password : join("", random_password.db_password.*.result) + + save_password_in_ssm = local.enabled && var.save_password_in_ssm + + db_password_key = format("%s/%s/passwords/%s", var.ssm_path_prefix, var.service_name, local.db_user) + db_password_ssm = local.save_password_in_ssm ? { + name = local.db_password_key + value = local.db_password + description = "Postgres Password for DB user ${local.db_user}" + type = "SecureString" + overwrite = true + } : null + + parameter_write = local.save_password_in_ssm ? [local.db_password_ssm] : [] + + # ALL grant always shows Terraform drift: + # https://github.com/cyrilgdn/terraform-provider-postgresql/issues/32 + # To workaround, expand what an ALL grant means for db or table + # https://github.com/cyrilgdn/terraform-provider-postgresql/blob/master/postgresql/helpers.go#L237-L244 + all_privileges_database = ["CREATE", "CONNECT", "TEMPORARY"] + all_privileges_schema = ["CREATE", "USAGE"] } resource "random_password" "db_password" { - count = var.enabled && length(var.db_password) == 0 ? 1 : 0 + count = local.enabled && length(var.db_password) == 0 ? 1 : 0 length = 33 special = false @@ -16,7 +36,7 @@ resource "random_password" "db_password" { } resource "postgresql_role" "default" { - count = var.enabled ? 1 : 0 + count = local.enabled ? 1 : 0 name = local.db_user password = local.db_password login = true @@ -24,30 +44,26 @@ resource "postgresql_role" "default" { # Apply the configured grants to the user resource "postgresql_grant" "default" { - count = var.enabled ? length(var.grants) : 0 + count = local.enabled ? length(var.grants) : 0 role = join("", postgresql_role.default.*.name) database = var.grants[count.index].db schema = var.grants[count.index].schema object_type = var.grants[count.index].object_type - privileges = var.grants[count.index].grant -} -resource "aws_ssm_parameter" "db_user" { - count = var.enabled ? 1 : 0 - name = local.db_user_key - value = local.db_user - description = "PostgreSQL Username (role) created by this module" - type = "String" - overwrite = true - tags = module.this.tags + # Conditionally set the privileges to either the explicit list of database privileges + # or schema privileges if this is a db grant or a schema grant respectively. + # We can determine this is a schema grant if a schema is given + privileges = contains(var.grants[count.index].grant, "ALL") ? ((length(var.grants[count.index].schema) > 0) ? local.all_privileges_schema : local.all_privileges_database) : var.grants[count.index].grant } -resource "aws_ssm_parameter" "db_password" { - count = var.enabled ? 1 : 0 - name = local.db_password_key - value = local.db_password - description = "PostgreSQL Password for the PostreSQL User (role) created by this module" - type = "SecureString" - overwrite = true - tags = module.this.tags +module "parameter_store_write" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.10.0" + + # kms_arn will only be used for SecureString parameters + kms_arn = var.kms_key_id # not necessarily ARN β€” alias works too + + parameter_write = local.parameter_write + + context = module.this.context } diff --git a/modules/aurora-postgres-resources/modules/postgresql-user/outputs.tf b/modules/aurora-postgres-resources/modules/postgresql-user/outputs.tf index 642e9268c..10bcebbce 100644 --- a/modules/aurora-postgres-resources/modules/postgresql-user/outputs.tf +++ b/modules/aurora-postgres-resources/modules/postgresql-user/outputs.tf @@ -8,11 +8,6 @@ output "db_user" { description = "DB user name" } -output "db_user_ssm_key" { - value = local.db_user_key - description = "SSM key under which user name is stored" -} - output "db_user_password" { value = local.db_password description = "DB user password" diff --git a/modules/aurora-postgres-resources/modules/postgresql-user/variables.tf b/modules/aurora-postgres-resources/modules/postgresql-user/variables.tf index 811b477a2..2cd745796 100644 --- a/modules/aurora-postgres-resources/modules/postgresql-user/variables.tf +++ b/modules/aurora-postgres-resources/modules/postgresql-user/variables.tf @@ -19,13 +19,13 @@ variable "grants" { type = list(object({ grant : list(string) db : string - schema : string + schema : optional(string, "") object_type : string })) description = <<-EOT - List of { grant: [, , ...], db: "db", schema: null, object_type: "database"}. + List of { grant: [, , ...], db: "db", schema: "", object_type: "database"}. EOT - default = [{ grant : ["ALL"], db : "*", schema : null, object_type : "database" }] + default = [{ grant : ["ALL"], db : "*", schema : "", object_type : "database" }] } variable "ssm_path_prefix" { @@ -33,3 +33,15 @@ variable "ssm_path_prefix" { default = "aurora-postgres" description = "SSM path prefix (without leading or trailing slash)" } + +variable "save_password_in_ssm" { + type = bool + default = true + description = "If true, DB user's password will be stored in SSM" +} + +variable "kms_key_id" { + type = string + default = "alias/aws/rds" + description = "KMS key ID, ARN, or alias to use for encrypting the database" +} diff --git a/modules/aurora-postgres-resources/modules/postgresql-user/versions.tf b/modules/aurora-postgres-resources/modules/postgresql-user/versions.tf index 37e88dc0a..6b2f61ae6 100644 --- a/modules/aurora-postgres-resources/modules/postgresql-user/versions.tf +++ b/modules/aurora-postgres-resources/modules/postgresql-user/versions.tf @@ -1,18 +1,18 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.0" - } - postgresql = { - source = "cyrilgdn/postgresql" - version = ">= 1.11.2" + version = ">= 4.9.0" } random = { source = "hashicorp/random" - version = ">= 3.0" + version = ">= 2.3" + } + postgresql = { + source = "cyrilgdn/postgresql" + version = ">= 1.17.1" } } } diff --git a/modules/aurora-postgres-resources/outputs.tf b/modules/aurora-postgres-resources/outputs.tf index 486fbbb6a..c2a4080b1 100644 --- a/modules/aurora-postgres-resources/outputs.tf +++ b/modules/aurora-postgres-resources/outputs.tf @@ -3,12 +3,17 @@ output "additional_users" { description = "Additional users" } -output "read_only_users" { - value = local.enabled ? local.sanitized_ro_users : null - description = "Read-only users" -} - output "additional_databases" { value = local.enabled ? values(postgresql_database.additional)[*].name : null description = "Additional databases" } + +output "additional_schemas" { + value = local.enabled ? values(postgresql_schema.additional)[*].name : null + description = "Additional schemas" +} + +output "additional_grants" { + value = keys(module.additional_grants) + description = "Additional grants" +} diff --git a/modules/aurora-postgres-resources/provider-postgres.tf b/modules/aurora-postgres-resources/provider-postgres.tf new file mode 100644 index 000000000..f07512af9 --- /dev/null +++ b/modules/aurora-postgres-resources/provider-postgres.tf @@ -0,0 +1,22 @@ +locals { + cluster_endpoint = module.aurora_postgres.outputs.config_map.endpoint + admin_user = module.aurora_postgres.outputs.config_map.username + admin_password_key = module.aurora_postgres.outputs.config_map.password_ssm_key + + admin_password = local.enabled ? (length(var.admin_password) > 0 ? var.admin_password : data.aws_ssm_parameter.admin_password[0].value) : "" +} + +data "aws_ssm_parameter" "admin_password" { + count = local.enabled && !(length(var.admin_password) > 0) ? 1 : 0 + + name = local.admin_password_key + + with_decryption = true +} + +provider "postgresql" { + host = local.cluster_endpoint + username = local.admin_user + password = local.admin_password + superuser = false +} diff --git a/modules/aurora-postgres-resources/providers.tf b/modules/aurora-postgres-resources/providers.tf index 1ad8d232e..ef923e10a 100644 --- a/modules/aurora-postgres-resources/providers.tf +++ b/modules/aurora-postgres-resources/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,22 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -provider "postgresql" { - host = local.cluster_endpoint - username = local.admin_user - password = local.admin_password - superuser = false -} diff --git a/modules/aurora-postgres-resources/read-only-user.tf b/modules/aurora-postgres-resources/read-only-user.tf deleted file mode 100644 index 8feb2a691..000000000 --- a/modules/aurora-postgres-resources/read-only-user.tf +++ /dev/null @@ -1,128 +0,0 @@ -locals { - cluster_ro_user = "cluster_ro" - - all_databases = local.enabled ? toset(compact(concat([local.database_name], tolist(var.additional_databases)))) : [] - - all_db_ro_grants = { for db in local.all_databases : db => [ - { - grant : ["CONNECT"] - db : db - schema : null - object_type : "database" - }, - { - grant : ["USAGE"] - db : db - schema : "public" - object_type : "schema" - }, - { - grant : ["SELECT"] - db : db - schema : "public" - object_type : "table" - }, - ] } - - # Need a placeholder for the derived admin_user so that we can use users_map in for_each - admin_user_placeholder = "+ADMIN_USER+" - - user_dbs = merge({ for service, v in var.additional_users : v.db_user => distinct([for g in v.grants : g.db if g.object_type == "database"]) }, - { (local.admin_user_placeholder) = local.all_databases }) - - users_map = merge(flatten([for u, dbs in local.user_dbs : { for db in dbs : "${db}_${u}" => { - user = u - db = db - } - if local.enabled - }])...) - - # all_users_map = merge(local.users_map, { - # for db in local.all_databases : "${local.cluster_ro_user}_${db}" => { - # user = local.cluster_ro_user - # db = db - # } - # }) - - read_only_users = local.enabled ? merge(module.read_only_db_users, - { cluster = module.read_only_cluster_user[0] }) : {} - - sanitized_ro_users = { for k, v in local.read_only_users : k => { for kk, vv in v : kk => vv if kk != "db_user_password" } } -} - -module "read_only_db_users" { - source = "./modules/postgresql-user" - - for_each = local.all_db_ro_grants - - enabled = local.enabled - - service_name = each.key - db_user = "${each.key}_ro" - db_password = "" - ssm_path_prefix = join("/", compact([local.ssm_path_prefix, local.cluster_name, "read-only"])) - - grants = each.value - - context = module.this.context - - depends_on = [ - postgresql_database.additional - ] -} - -module "read_only_cluster_user" { - source = "./modules/postgresql-user" - - count = local.enabled ? 1 : 0 - - enabled = local.enabled - service_name = "cluster" - db_user = local.cluster_ro_user - db_password = "" - ssm_path_prefix = join("/", compact([local.ssm_path_prefix, local.cluster_name, "read-only"])) - - grants = flatten(values(local.all_db_ro_grants)) - - context = module.this.context - - depends_on = [ - postgresql_database.additional - ] -} - -resource "postgresql_default_privileges" "read_only_tables_users" { - for_each = local.users_map - - role = "${each.value.db}_ro" - database = each.value.db - schema = "public" - - owner = each.value.user == local.admin_user_placeholder ? local.admin_user : each.value.user - object_type = "table" - privileges = ["SELECT"] - - depends_on = [ - module.read_only_db_users, - module.read_only_cluster_user, - postgresql_database.additional - ] -} - -resource "postgresql_default_privileges" "read_only_tables_cluster" { - for_each = local.users_map - - role = local.cluster_ro_user - database = each.value.db - schema = "public" - - owner = each.value.user == local.admin_user_placeholder ? local.admin_user : each.value.user - object_type = "table" - privileges = ["SELECT"] - - depends_on = [ - module.read_only_db_users, - module.read_only_cluster_user, - postgresql_database.additional - ] -} diff --git a/modules/aurora-postgres-resources/remote-state.tf b/modules/aurora-postgres-resources/remote-state.tf index c85882274..f4cebbc63 100644 --- a/modules/aurora-postgres-resources/remote-state.tf +++ b/modules/aurora-postgres-resources/remote-state.tf @@ -1,6 +1,6 @@ module "aurora_postgres" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" component = var.aurora_postgres_component_name diff --git a/modules/aurora-postgres-resources/variables.tf b/modules/aurora-postgres-resources/variables.tf index 56193b877..f46902939 100644 --- a/modules/aurora-postgres-resources/variables.tf +++ b/modules/aurora-postgres-resources/variables.tf @@ -6,12 +6,51 @@ variable "region" { variable "aurora_postgres_component_name" { type = string description = "Aurora Postgres component name to read the remote state from" + default = "aurora-postgres" +} + +variable "read_passwords_from_ssm" { + type = bool + default = true + description = "When `true`, fetch user passwords from SSM" +} + +variable "ssm_path_prefix" { + type = string + default = "aurora-postgres" + description = "SSM path prefix" +} + +variable "ssm_password_source" { + type = string + default = "" + description = <<-EOT + If var.read_passwords_from_ssm is true, DB user passwords will be retrieved from SSM using `var.ssm_password_source` and the database username. If this value is not set, a default path will be created using the SSM path prefix and ID of the associated Aurora Cluster. + EOT +} + +variable "admin_password" { + type = string + description = "postgresql password for the admin user" + default = "" +} + +variable "db_name" { + type = string + description = "Database name (default is not to create a database)" + default = "" +} + +variable "cluster_enabled" { + type = string + default = true + description = "Set to `false` to prevent the module from creating any resources" } variable "additional_databases" { type = set(string) default = [] - description = "Define additional databases to create." + description = "Additional databases to be created with the cluster" } variable "additional_users" { @@ -27,5 +66,35 @@ variable "additional_users" { })) })) default = {} - description = "Define additional users to create." + description = <<-EOT + Create additional database user for a service, specifying username, grants, and optional password. + If no password is specified, one will be generated. Username and password will be stored in + SSM parameter store under the service's key. + EOT +} + +variable "additional_grants" { + # map key is user name + type = map(list(object({ + grant : list(string) + db : string + }))) + default = {} + description = <<-EOT + Create additional database user with specified grants. + If `var.ssm_password_source` is set, passwords will be retrieved from SSM parameter store, + otherwise, passwords will be generated and stored in SSM parameter store under the service's key. + EOT +} + +variable "additional_schemas" { + # Map key is the name of the schema + type = map(object({ + database : string + })) + default = {} + description = <<-EOT + Create additional schemas for a given database. + If no database is given, the schema will use the database used by the provider configuration + EOT } diff --git a/modules/aurora-postgres-resources/versions.tf b/modules/aurora-postgres-resources/versions.tf index 30c0338b1..1911bc9af 100644 --- a/modules/aurora-postgres-resources/versions.tf +++ b/modules/aurora-postgres-resources/versions.tf @@ -1,14 +1,14 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.0" + version = ">= 4.9.0" } postgresql = { source = "cyrilgdn/postgresql" - version = ">= 1.11.2" + version = ">= 1.17.1" } } } diff --git a/modules/aurora-postgres/README.md b/modules/aurora-postgres/README.md index 8d9ff6020..fb524bfee 100644 --- a/modules/aurora-postgres/README.md +++ b/modules/aurora-postgres/README.md @@ -1,11 +1,14 @@ -# Component: `aurora-postgres` +--- +tags: + - component/aurora-postgres + - layer/data + - provider/aws +--- -This component is responsible for provisioning Aurora Postgres RDS clusters. -It seeds relevant database information (hostnames, username, password, etc.) into AWS SSM Parameter Store. +# Component: `aurora-postgres` -**NOTE**: Creating additional users (including read-only users) and databases -requires Spacelift, since that action to be done via the postgresql provider, -and by default only the automation account is whitelisted by the Aurora cluster. +This component is responsible for provisioning Aurora Postgres RDS clusters. It seeds relevant database information +(hostnames, username, password, etc.) into AWS SSM Parameter Store. ## Usage @@ -13,115 +16,290 @@ and by default only the automation account is whitelisted by the Aurora cluster. Here's an example for how to use this component. -`stacks/catalog/aurora/defaults.yaml` file (base component for all Aurora Postgres clusters with default settings): +`stacks/catalog/aurora-postgres/defaults.yaml` file (base component for all Aurora Postgres clusters with default +settings): ```yaml components: terraform: - aurora-postgres: + aurora-postgres/defaults: + metadata: + type: abstract vars: - instance_type: db.r5.large - cluster_size: 1 + enabled: true + name: aurora-postgres + tags: + Team: sre + Service: aurora-postgres + cluster_name: shared + deletion_protection: false + storage_encrypted: true engine: aurora-postgresql - cluster_family: aurora-postgresql12 - engine_version: 12.4 + + # Provisioned configuration engine_mode: provisioned - iam_database_authentication_enabled: false - deletion_protection: true - storage_encrypted: true - database_name: "" - admin_user: "" - admin_password: "" - read_only_users_enabled: false + engine_version: "15.3" + cluster_family: aurora-postgresql15 + # 1 writer, 1 reader + cluster_size: 2 + # https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html + instance_type: db.t3.medium + + admin_user: postgres + admin_password: "" # generate random password + database_name: postgres + database_port: 5432 + skip_final_snapshot: false + # Enhanced Monitoring + # A boolean flag to enable/disable the creation of the enhanced monitoring IAM role. + # If set to false, the module will not create a new role and will use rds_monitoring_role_arn for enhanced monitoring + enhanced_monitoring_role_enabled: true + # The interval, in seconds, between points when enhanced monitoring metrics are collected for the DB instance. + # To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 + rds_monitoring_interval: 15 + # Allow ingress from the following accounts + # If any of tenant, stage, or environment aren't given, this will be taken + allow_ingress_from_vpc_accounts: + - tenant: core + stage: auto ``` -Example (not actual) -`stacks/uw2-dev.yaml` file (override the default settings for the cluster in the `dev` account, create an additional database and user): + +Example (not actual): + +`stacks/uw2-dev.yaml` file (override the default settings for the cluster in the `dev` account, create an additional +database and user): ```yaml import: - - catalog/aurora/defaults + - catalog/aurora-postgres/defaults + +components: + terraform: + aurora-postgres: + metadata: + component: aurora-postgres + inherits: + - aurora-postgres/defaults + vars: + enabled: true +``` + +### Finding Aurora Engine Version + +Use the following to query the AWS API by `engine-mode`. Both provisioned and Serverless v2 use the `privisoned` engine +mode, whereas only Serverless v1 uses the `serverless` engine mode. + +```bash +aws rds describe-db-engine-versions \ + --engine aurora-postgresql \ + --query 'DBEngineVersions[].EngineVersion' \ + --filters 'Name=engine-mode,Values=serverless' +``` + +Use the following to query AWS API by `db-instance-class`. Use this query to find supported versions for a specific +instance class, such as `db.serverless` with Serverless v2. + +```bash +aws rds describe-orderable-db-instance-options \ + --engine aurora-postgresql \ + --db-instance-class db.serverless \ + --query 'OrderableDBInstanceOptions[].[EngineVersion]' +``` + +Once a version has been selected, use the following to find the cluster family. + +```bash +aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[]" | \ +jq '.[] | select(.EngineVersion == "15.3") | + { Engine: .Engine, EngineVersion: .EngineVersion, DBParameterGroupFamily: .DBParameterGroupFamily }' +``` + +## Examples + +Generally there are three different engine configurations for Aurora: provisioned, Serverless v1, and Serverless v2. + +### Provisioned Aurora Postgres +[See the default usage example above](#usage) + +### Serverless v1 Aurora Postgres + +Serverless v1 requires `engine-mode` set to `serverless` uses `scaling_configuration` to configure scaling options. + +For valid values, see +[ModifyCurrentDBClusterCapacity](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_ModifyCurrentDBClusterCapacity.html). + +```yaml components: terraform: aurora-postgres: vars: - instance_type: db.r5.large - cluster_size: 1 - cluster_name: main - database_name: main - additional_databases: - - example_db - additional_users: - example_service: - db_user: example_user - db_password: "" - grants: - - grant: [ "ALL" ] - db: example_db - object_type: database - schema: null + enabled: true + name: aurora-postgres + eks_component_names: + - eks/cluster + allow_ingress_from_vpc_accounts: + # Allows Spacelift + - tenant: core + stage: auto + environment: use2 + # Allows VPN + - tenant: core + stage: network + environment: use2 + cluster_name: shared + engine: aurora-postgresql + + # Serverless v1 configuration + engine_mode: serverless + instance_type: "" # serverless engine_mode ignores `var.instance_type` + engine_version: "13.9" # Latest supported version as of 08/28/2023 + cluster_family: aurora-postgresql13 + cluster_size: 0 # serverless + scaling_configuration: + - auto_pause: true + max_capacity: 5 + min_capacity: 2 + seconds_until_auto_pause: 300 + timeout_action: null + + admin_user: postgres + admin_password: "" # generate random password + database_name: postgres + database_port: 5432 + storage_encrypted: true + deletion_protection: true + skip_final_snapshot: false + # Creating read-only users or additional databases requires Spacelift + read_only_users_enabled: false + # Enhanced Monitoring + # A boolean flag to enable/disable the creation of the enhanced monitoring IAM role. + # If set to false, the module will not create a new role and will use rds_monitoring_role_arn for enhanced monitoring + enhanced_monitoring_role_enabled: true + enhanced_monitoring_attributes: ["monitoring"] + # The interval, in seconds, between points when enhanced monitoring metrics are collected for the DB instance. + # To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 + rds_monitoring_interval: 15 + iam_database_authentication_enabled: false + additional_users: {} ``` +### Serverless v2 Aurora Postgres + +Aurora Postgres Serverless v2 uses the `provisioned` engine mode with `db.serverless` instances. In order to configure +scaling with Serverless v2, use `var.serverlessv2_scaling_configuration`. + +For more on valid scaling configurations, see +[Performance and scaling for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.setting-capacity.html). + +```yaml +components: + terraform: + aurora-postgres: + vars: + enabled: true + name: aurora-postgres + eks_component_names: + - eks/cluster + allow_ingress_from_vpc_accounts: + # Allows Spacelift + - tenant: core + stage: auto + environment: use2 + # Allows VPN + - tenant: core + stage: network + environment: use2 + cluster_name: shared + engine: aurora-postgresql + + # Serverless v2 configuration + engine_mode: provisioned + instance_type: "db.serverless" + engine_version: "15.3" + cluster_family: aurora-postgresql15 + cluster_size: 2 + serverlessv2_scaling_configuration: + min_capacity: 2 + max_capacity: 64 + + admin_user: postgres + admin_password: "" # generate random password + database_name: postgres + database_port: 5432 + storage_encrypted: true + deletion_protection: true + skip_final_snapshot: false + # Creating read-only users or additional databases requires Spacelift + read_only_users_enabled: false + # Enhanced Monitoring + # A boolean flag to enable/disable the creation of the enhanced monitoring IAM role. + # If set to false, the module will not create a new role and will use rds_monitoring_role_arn for enhanced monitoring + enhanced_monitoring_role_enabled: true + enhanced_monitoring_attributes: ["monitoring"] + # The interval, in seconds, between points when enhanced monitoring metrics are collected for the DB instance. + # To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 + rds_monitoring_interval: 15 + iam_database_authentication_enabled: false + additional_users: {} +``` + + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 3.0 | -| [postgresql](#requirement\_postgresql) | >= 1.14.0 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [postgresql](#requirement\_postgresql) | >= 1.17.1 | | [random](#requirement\_random) | >= 2.3 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.0 | -| [postgresql](#provider\_postgresql) | >= 1.14.0 | +| [aws](#provider\_aws) | >= 4.9.0 | | [random](#provider\_random) | >= 2.3 | ## Modules | Name | Source | Version | |------|--------|---------| -| [additional\_users](#module\_additional\_users) | ./modules/postgresql-user | n/a | -| [aurora\_postgres\_cluster](#module\_aurora\_postgres\_cluster) | cloudposse/rds-cluster/aws | 0.47.2 | +| [aurora\_postgres\_cluster](#module\_aurora\_postgres\_cluster) | cloudposse/rds-cluster/aws | 1.3.2 | | [cluster](#module\_cluster) | cloudposse/label/null | 0.25.0 | -| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [kms\_key\_rds](#module\_kms\_key\_rds) | cloudposse/kms-key/aws | 0.12.0 | -| [parameter\_store\_write](#module\_parameter\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.8.3 | -| [read\_only\_cluster\_user](#module\_read\_only\_cluster\_user) | ./modules/postgresql-user | n/a | -| [read\_only\_db\_users](#module\_read\_only\_db\_users) | ./modules/postgresql-user | n/a | +| [kms\_key\_rds](#module\_kms\_key\_rds) | cloudposse/kms-key/aws | 0.12.1 | +| [parameter\_store\_write](#module\_parameter\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.11.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | -| [vpc\_spacelift](#module\_vpc\_spacelift) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources | Name | Type | |------|------| -| [postgresql_database.additional](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/database) | resource | -| [postgresql_default_privileges.read_only_tables_cluster](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/default_privileges) | resource | -| [postgresql_default_privileges.read_only_tables_users](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/default_privileges) | resource | | [random_password.admin_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | | [random_pet.admin_user](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource | | [random_pet.database_name](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.kms_key_rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_security_groups.allowed](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/security_groups) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [additional\_databases](#input\_additional\_databases) | n/a | `set(string)` | `[]` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | -| [additional\_users](#input\_additional\_users) | n/a |
map(object({
db_user : string
db_password : string
grants : list(object({
grant : list(string)
db : string
schema : string
object_type : string
}))
}))
| `{}` | no | | [admin\_password](#input\_admin\_password) | Postgres password for the admin user | `string` | `""` | no | | [admin\_user](#input\_admin\_user) | Postgres admin user name | `string` | `""` | no | +| [allow\_ingress\_from\_vpc\_accounts](#input\_allow\_ingress\_from\_vpc\_accounts) | List of account contexts to pull VPC ingress CIDR and add to cluster security group.
e.g.
{
environment = "ue2",
stage = "auto",
tenant = "core"
}

Defaults to the "vpc" component in the given account |
list(object({
vpc = optional(string, "vpc")
environment = optional(string)
stage = optional(string)
tenant = optional(string)
}))
| `[]` | no | +| [allow\_major\_version\_upgrade](#input\_allow\_major\_version\_upgrade) | Enable to allow major engine version upgrades when changing engine versions. Defaults to false. | `bool` | `false` | no | | [allowed\_cidr\_blocks](#input\_allowed\_cidr\_blocks) | List of CIDRs allowed to access the database (in addition to security groups and subnets) | `list(string)` | `[]` | no | +| [allowed\_security\_group\_names](#input\_allowed\_security\_group\_names) | List of security group names (tags) that should be allowed access to the database | `list(string)` | `[]` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [autoscaling\_enabled](#input\_autoscaling\_enabled) | Whether to enable cluster autoscaling | `bool` | `false` | no | | [autoscaling\_max\_capacity](#input\_autoscaling\_max\_capacity) | Maximum number of instances to be maintained by the autoscaler | `number` | `5` | no | @@ -131,9 +309,12 @@ components: | [autoscaling\_scale\_out\_cooldown](#input\_autoscaling\_scale\_out\_cooldown) | The amount of time, in seconds, after a scaling activity completes and before the next scaling up activity can start. Default is 300s | `number` | `300` | no | | [autoscaling\_target\_metrics](#input\_autoscaling\_target\_metrics) | The metrics type to use. If this value isn't provided the default is CPU utilization | `string` | `"RDSReaderAverageCPUUtilization"` | no | | [autoscaling\_target\_value](#input\_autoscaling\_target\_value) | The target value to scale with respect to target metrics | `number` | `75` | no | +| [backup\_window](#input\_backup\_window) | Daily time range during which the backups happen, UTC | `string` | `"07:00-09:00"` | no | +| [ca\_cert\_identifier](#input\_ca\_cert\_identifier) | The identifier of the CA certificate for the DB instance | `string` | `null` | no | | [cluster\_dns\_name\_part](#input\_cluster\_dns\_name\_part) | Part of DNS name added to module and cluster name for DNS for cluster endpoint | `string` | `"writer"` | no | | [cluster\_family](#input\_cluster\_family) | Family of the DB parameter group. Valid values for Aurora PostgreSQL: `aurora-postgresql9.6`, `aurora-postgresql10`, `aurora-postgresql11`, `aurora-postgresql12` | `string` | `"aurora-postgresql13"` | no | | [cluster\_name](#input\_cluster\_name) | Short name for this cluster | `string` | n/a | yes | +| [cluster\_parameters](#input\_cluster\_parameters) | List of DB cluster parameters to apply |
list(object({
apply_method = string
name = string
value = string
}))
| `[]` | no | | [cluster\_size](#input\_cluster\_size) | Postgres cluster size | `number` | n/a | yes | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [database\_name](#input\_database\_name) | Name for an automatically created database on cluster creation. An empty name will generate a db name. | `string` | `""` | no | @@ -142,19 +323,20 @@ components: | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [dns\_gbl\_delegated\_environment\_name](#input\_dns\_gbl\_delegated\_environment\_name) | The name of the environment where global `dns_delegated` is provisioned | `string` | `"gbl"` | no | +| [eks\_component\_names](#input\_eks\_component\_names) | The names of the eks components | `set(string)` |
[
"eks/cluster"
]
| no | +| [eks\_security\_group\_enabled](#input\_eks\_security\_group\_enabled) | Use the eks default security group | `bool` | `false` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [enabled\_cloudwatch\_logs\_exports](#input\_enabled\_cloudwatch\_logs\_exports) | List of log types to export to cloudwatch. The following log types are supported: audit, error, general, slowquery | `list(string)` | `[]` | no | | [engine](#input\_engine) | Name of the database engine to be used for the DB cluster | `string` | `"postgresql"` | no | | [engine\_mode](#input\_engine\_mode) | The database engine mode. Valid values: `global`, `multimaster`, `parallelquery`, `provisioned`, `serverless` | `string` | n/a | yes | | [engine\_version](#input\_engine\_version) | Engine version of the Aurora global database | `string` | `"13.4"` | no | +| [enhanced\_monitoring\_attributes](#input\_enhanced\_monitoring\_attributes) | Attributes used to format the Enhanced Monitoring IAM role. If this role hits IAM role length restrictions (max 64 characters), consider shortening these strings. | `list(string)` |
[
"enhanced-monitoring"
]
| no | | [enhanced\_monitoring\_role\_enabled](#input\_enhanced\_monitoring\_role\_enabled) | A boolean flag to enable/disable the creation of the enhanced monitoring IAM role. If set to `false`, the module will not create a new role and will use `rds_monitoring_role_arn` for enhanced monitoring | `bool` | `true` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [iam\_database\_authentication\_enabled](#input\_iam\_database\_authentication\_enabled) | Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [instance\_type](#input\_instance\_type) | EC2 instance type for Postgres cluster | `string` | n/a | yes | -| [kms\_alias\_name\_ssm](#input\_kms\_alias\_name\_ssm) | KMS alias name for SSM | `string` | `"alias/aws/ssm"` | no | +| [intra\_security\_group\_traffic\_enabled](#input\_intra\_security\_group\_traffic\_enabled) | Whether to allow traffic between resources inside the database's security group. | `bool` | `false` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -165,10 +347,12 @@ components: | [performance\_insights\_enabled](#input\_performance\_insights\_enabled) | Whether to enable Performance Insights | `bool` | `false` | no | | [publicly\_accessible](#input\_publicly\_accessible) | Set true to make this database accessible from the public internet | `bool` | `false` | no | | [rds\_monitoring\_interval](#input\_rds\_monitoring\_interval) | The interval, in seconds, between points when enhanced monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 | `number` | `60` | no | -| [read\_only\_users\_enabled](#input\_read\_only\_users\_enabled) | Set `true` to automatically create read-only users for every database | `bool` | `false` | no | | [reader\_dns\_name\_part](#input\_reader\_dns\_name\_part) | Part of DNS name added to module and cluster name for DNS for cluster reader | `string` | `"reader"` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [retention\_period](#input\_retention\_period) | Number of days to retain backups for | `number` | `5` | no | +| [scaling\_configuration](#input\_scaling\_configuration) | List of nested attributes with scaling properties. Only valid when `engine_mode` is set to `serverless`. This is required for Serverless v1 |
list(object({
auto_pause = bool
max_capacity = number
min_capacity = number
seconds_until_auto_pause = number
timeout_action = string
}))
| `[]` | no | +| [serverlessv2\_scaling\_configuration](#input\_serverlessv2\_scaling\_configuration) | Nested attribute with scaling properties for ServerlessV2. Only valid when `engine_mode` is set to `provisioned.` This is required for Serverless v2 |
object({
min_capacity = number
max_capacity = number
})
| `null` | no | | [skip\_final\_snapshot](#input\_skip\_final\_snapshot) | Normally AWS makes a snapshot of the database before deleting it. Set this to `true` in order to skip this.
NOTE: The final snapshot has a name derived from the cluster name. If you delete a cluster, get a final snapshot,
then create a cluster of the same name, its final snapshot will fail with a name collision unless you delete
the previous final snapshot first. | `bool` | `false` | no | | [snapshot\_identifier](#input\_snapshot\_identifier) | Specifies whether or not to create this cluster from a snapshot | `string` | `null` | no | | [ssm\_path\_prefix](#input\_ssm\_path\_prefix) | Top level SSM path prefix (without leading or trailing slash) | `string` | `"aurora-postgres"` | no | @@ -176,26 +360,27 @@ components: | [storage\_encrypted](#input\_storage\_encrypted) | Specifies whether the DB cluster is encrypted | `bool` | `true` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [vpc\_spacelift\_stage\_name](#input\_vpc\_spacelift\_stage\_name) | The name of the stage of the `vpc` component where `spacelift-worker-pool` is provisioned | `string` | `"auto"` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | The name of the VPC component | `string` | `"vpc"` | no | ## Outputs | Name | Description | |------|-------------| -| [additional\_users](#output\_additional\_users) | Information about additional DB users created by request | | [admin\_username](#output\_admin\_username) | Postgres admin username | +| [allowed\_security\_groups](#output\_allowed\_security\_groups) | The resulting list of security group IDs that are allowed to connect to the Aurora Postgres cluster. | | [cluster\_identifier](#output\_cluster\_identifier) | Postgres cluster identifier | | [config\_map](#output\_config\_map) | Map containing information pertinent to a PostgreSQL client configuration. | | [database\_name](#output\_database\_name) | Postgres database name | +| [kms\_key\_arn](#output\_kms\_key\_arn) | KMS key ARN for Aurora Postgres | | [master\_hostname](#output\_master\_hostname) | Postgres master hostname | -| [read\_only\_users](#output\_read\_only\_users) | List of read only users. | | [replicas\_hostname](#output\_replicas\_hostname) | Postgres replicas hostname | | [ssm\_key\_paths](#output\_ssm\_key\_paths) | Names (key paths) of all SSM parameters stored for this cluster | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/aurora-postgres) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/aurora-postgres) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/aurora-postgres/cluster-regional.tf b/modules/aurora-postgres/cluster-regional.tf index 4a3a6ba91..ef0c923dd 100644 --- a/modules/aurora-postgres/cluster-regional.tf +++ b/modules/aurora-postgres/cluster-regional.tf @@ -2,15 +2,10 @@ # This means that explicit provider blocks appear only in the root module, and downstream modules can simply # declare resources for that provider and have them automatically associated with the root provider configurations -locals { - additional_users = local.enabled ? var.additional_users : {} - sanitized_additional_users = { for k, v in module.additional_users : k => { for kk, vv in v : kk => vv if kk != "db_user_password" } } -} - # https://www.terraform.io/docs/providers/aws/r/rds_cluster.html module "aurora_postgres_cluster" { source = "cloudposse/rds-cluster/aws" - version = "0.47.2" + version = "1.3.2" cluster_type = "regional" engine = var.engine @@ -22,38 +17,46 @@ module "aurora_postgres_cluster" { admin_user = local.admin_user admin_password = local.admin_password - db_name = local.database_name - publicly_accessible = var.publicly_accessible - db_port = var.database_port - vpc_id = local.vpc_id - subnets = local.private_subnet_ids - zone_id = local.zone_id - cluster_dns_name = local.cluster_dns_name - reader_dns_name = local.reader_dns_name - security_groups = local.allowed_security_groups - allowed_cidr_blocks = concat(var.allowed_cidr_blocks, [module.vpc_spacelift.outputs.vpc_cidr]) - iam_database_authentication_enabled = var.iam_database_authentication_enabled - storage_encrypted = var.storage_encrypted - kms_key_arn = var.storage_encrypted ? module.kms_key_rds.key_arn : null - performance_insights_kms_key_id = var.performance_insights_enabled ? module.kms_key_rds.key_arn : null - maintenance_window = var.maintenance_window - enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports - enhanced_monitoring_role_enabled = var.enhanced_monitoring_role_enabled - performance_insights_enabled = var.performance_insights_enabled - rds_monitoring_interval = var.rds_monitoring_interval - autoscaling_enabled = var.autoscaling_enabled - autoscaling_policy_type = var.autoscaling_policy_type - autoscaling_target_metrics = var.autoscaling_target_metrics - autoscaling_target_value = var.autoscaling_target_value - autoscaling_scale_in_cooldown = var.autoscaling_scale_in_cooldown - autoscaling_scale_out_cooldown = var.autoscaling_scale_out_cooldown - autoscaling_min_capacity = var.autoscaling_min_capacity - autoscaling_max_capacity = var.autoscaling_max_capacity - skip_final_snapshot = var.skip_final_snapshot - deletion_protection = var.deletion_protection - snapshot_identifier = var.snapshot_identifier + db_name = local.database_name + publicly_accessible = var.publicly_accessible + db_port = var.database_port + vpc_id = local.vpc_id + subnets = local.private_subnet_ids + zone_id = local.zone_id + cluster_dns_name = local.cluster_dns_name + reader_dns_name = local.reader_dns_name + security_groups = local.allowed_security_groups + intra_security_group_traffic_enabled = var.intra_security_group_traffic_enabled + allowed_cidr_blocks = local.allowed_cidr_blocks + iam_database_authentication_enabled = var.iam_database_authentication_enabled + storage_encrypted = var.storage_encrypted + kms_key_arn = var.storage_encrypted ? module.kms_key_rds.key_arn : null + performance_insights_kms_key_id = var.performance_insights_enabled ? module.kms_key_rds.key_arn : null + maintenance_window = var.maintenance_window + enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports + enhanced_monitoring_role_enabled = var.enhanced_monitoring_role_enabled + enhanced_monitoring_attributes = var.enhanced_monitoring_attributes + performance_insights_enabled = var.performance_insights_enabled + rds_monitoring_interval = var.rds_monitoring_interval + autoscaling_enabled = var.autoscaling_enabled + autoscaling_policy_type = var.autoscaling_policy_type + autoscaling_target_metrics = var.autoscaling_target_metrics + autoscaling_target_value = var.autoscaling_target_value + autoscaling_scale_in_cooldown = var.autoscaling_scale_in_cooldown + autoscaling_scale_out_cooldown = var.autoscaling_scale_out_cooldown + autoscaling_min_capacity = var.autoscaling_min_capacity + autoscaling_max_capacity = var.autoscaling_max_capacity + scaling_configuration = var.scaling_configuration + serverlessv2_scaling_configuration = var.serverlessv2_scaling_configuration + skip_final_snapshot = var.skip_final_snapshot + deletion_protection = var.deletion_protection + snapshot_identifier = var.snapshot_identifier + allow_major_version_upgrade = var.allow_major_version_upgrade + ca_cert_identifier = var.ca_cert_identifier + retention_period = var.retention_period + backup_window = var.backup_window - cluster_parameters = [ + cluster_parameters = concat([ { apply_method = "immediate" name = "log_statement" @@ -64,26 +67,7 @@ module "aurora_postgres_cluster" { name = "log_min_duration_statement" value = "0" } - ] + ], var.cluster_parameters) context = module.cluster.context } - -resource "postgresql_database" "additional" { - for_each = module.this.enabled ? var.additional_databases : [] - name = each.key - depends_on = [module.aurora_postgres_cluster.cluster_identifier] -} - -module "additional_users" { - for_each = local.additional_users - source = "./modules/postgresql-user" - - service_name = each.key - db_user = each.value.db_user - db_password = each.value.db_password - grants = each.value.grants - ssm_path_prefix = local.ssm_path_prefix - - depends_on = [module.aurora_postgres_cluster.cluster_identifier] -} diff --git a/modules/aurora-postgres/default.auto.tfvars b/modules/aurora-postgres/default.auto.tfvars deleted file mode 100644 index 8e0ec92c3..000000000 --- a/modules/aurora-postgres/default.auto.tfvars +++ /dev/null @@ -1,25 +0,0 @@ -name = "pg" - -enabled = false - -deletion_protection = true - -storage_encrypted = true - -engine = "aurora-postgresql" - -engine_mode = "provisioned" - -# https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html -# https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.PostgreSQL.html -# aws rds describe-db-engine-versions --engine aurora-postgresql -# If you know the engine version (example here is "12.4"), use Engine and DBParameterGroupFamily from: -# aws rds describe-db-engine-versions --query "DBEngineVersions[]" | \ -# jq '.[] | select(.EngineVersion == "12.4") | -# { Engine: .Engine, EngineVersion: .EngineVersion, DBParameterGroupFamily: .DBParameterGroupFamily }' -# For Aurora Postgres 12.4: -# engine: "postgresql" -# cluster_family: "aurora-postgresql12" -engine_version = "13.4" - -cluster_family = "aurora-postgresql13" diff --git a/modules/aurora-postgres/kms.tf b/modules/aurora-postgres/kms.tf index 08d4d40cf..74af4621d 100644 --- a/modules/aurora-postgres/kms.tf +++ b/modules/aurora-postgres/kms.tf @@ -1,6 +1,6 @@ module "kms_key_rds" { source = "cloudposse/kms-key/aws" - version = "0.12.0" + version = "0.12.1" description = "KMS key for Aurora Postgres" deletion_window_in_days = 10 diff --git a/modules/aurora-postgres/main.tf b/modules/aurora-postgres/main.tf index 4e42525f1..6c8818162 100644 --- a/modules/aurora-postgres/main.tf +++ b/modules/aurora-postgres/main.tf @@ -1,22 +1,40 @@ locals { enabled = module.this.enabled - vpc_id = module.vpc.outputs.vpc_id - private_subnet_ids = module.vpc.outputs.private_subnet_ids - allowed_security_groups = [module.eks.outputs.eks_cluster_managed_security_group_id] + vpc_id = module.vpc.outputs.vpc_id + private_subnet_ids = module.vpc.outputs.private_subnet_ids + + eks_security_group_enabled = local.enabled && var.eks_security_group_enabled + allowed_eks_security_groups = [ + for eks in module.eks : + eks.outputs.eks_cluster_managed_security_group_id + ] + allowed_security_groups = concat(data.aws_security_groups.allowed.ids, local.allowed_eks_security_groups) zone_id = module.dns_gbl_delegated.outputs.default_dns_zone_id - admin_user = length(var.admin_user) > 0 ? var.admin_user : join("", random_pet.admin_user.*.id) - admin_password = length(var.admin_password) > 0 ? var.admin_password : join("", random_password.admin_password.*.result) - database_name = length(var.database_name) > 0 ? var.database_name : join("", random_pet.database_name.*.id) + admin_user = length(var.admin_user) > 0 ? var.admin_user : join("", random_pet.admin_user[*].id) + admin_password = length(var.admin_password) > 0 ? var.admin_password : join("", random_password.admin_password[*].result) + database_name = length(var.database_name) > 0 ? var.database_name : join("", random_pet.database_name[*].id) cluster_dns_name_prefix = format("%v%v%v%v", module.this.name, module.this.delimiter, var.cluster_name, module.this.delimiter) cluster_dns_name = format("%v%v", local.cluster_dns_name_prefix, var.cluster_dns_name_part) reader_dns_name = format("%v%v", local.cluster_dns_name_prefix, var.reader_dns_name_part) - ssm_path_prefix = format("/%s/%s", var.ssm_path_prefix, module.cluster.id) - ssm_cluster_key_prefix = format("%s/%s", local.ssm_path_prefix, "cluster") + allowed_cidr_blocks = concat( + var.allowed_cidr_blocks, + [ + for k in keys(module.vpc_ingress) : + module.vpc_ingress[k].outputs.vpc_cidr + ] + ) +} + +data "aws_security_groups" "allowed" { + filter { + name = "tag:Name" + values = var.allowed_security_group_names + } } module "cluster" { diff --git a/modules/aurora-postgres/modules/postgresql-user/main.tf b/modules/aurora-postgres/modules/postgresql-user/main.tf deleted file mode 100644 index 74c79093f..000000000 --- a/modules/aurora-postgres/modules/postgresql-user/main.tf +++ /dev/null @@ -1,42 +0,0 @@ -locals { - db_user = length(var.db_user) > 0 ? var.db_user : var.service_name - db_password = length(var.db_password) > 0 ? var.db_password : join("", random_password.db_password.*.result) - db_password_key = format("%s/%s/%s/%s", var.ssm_path_prefix, var.service_name, local.db_user, "db_password") -} - -resource "random_password" "db_password" { - count = var.enabled && length(var.db_password) == 0 ? 1 : 0 - length = 33 - special = false - - keepers = { - db_user = local.db_user - } -} - -resource "postgresql_role" "default" { - count = var.enabled ? 1 : 0 - name = local.db_user - password = local.db_password - login = true -} - -# Apply the configured grants to the user -resource "postgresql_grant" "default" { - count = var.enabled ? length(var.grants) : 0 - role = join("", postgresql_role.default.*.name) - database = var.grants[count.index].db - schema = var.grants[count.index].schema - object_type = var.grants[count.index].object_type - privileges = var.grants[count.index].grant -} - -resource "aws_ssm_parameter" "db_password" { - count = var.enabled ? 1 : 0 - name = local.db_password_key - value = local.db_password - description = "PostgreSQL Password for the PostreSQL User (role) created by this module" - type = "SecureString" - overwrite = true - tags = module.this.tags -} diff --git a/modules/aurora-postgres/modules/postgresql-user/outputs.tf b/modules/aurora-postgres/modules/postgresql-user/outputs.tf deleted file mode 100644 index f21892a0f..000000000 --- a/modules/aurora-postgres/modules/postgresql-user/outputs.tf +++ /dev/null @@ -1,19 +0,0 @@ -output "notice" { - value = "Password for user ${local.db_user} is stored in SSM under ${local.db_password_key}" - description = "Note to user" -} - -output "db_user" { - value = local.db_user - description = "DB user name" -} - -output "db_user_password_ssm_key" { - value = local.db_password_key - description = "SSM key under which user password is stored" -} - -output "service_name" { - value = var.service_name - description = "Service for which this user was created" -} diff --git a/modules/aurora-postgres/modules/postgresql-user/variables.tf b/modules/aurora-postgres/modules/postgresql-user/variables.tf deleted file mode 100644 index 39426489d..000000000 --- a/modules/aurora-postgres/modules/postgresql-user/variables.tf +++ /dev/null @@ -1,35 +0,0 @@ -variable "service_name" { - type = string - description = "Name of service owning the database (used in SSM key)" -} - -variable "db_user" { - type = string - description = "PostgreSQL user name to create (default is service name)" - default = "" -} - -variable "db_password" { - type = string - description = "PostgreSQL password created user (generated if not provided)" - default = "" -} - -variable "grants" { - type = list(object({ - grant : list(string) - db : string - schema : string - object_type : string - })) - description = <<-EOT - List of { grant: [, , ...], db: "db", schema: null, object_type: "database"}. - EOT - default = [{ grant : ["ALL"], db : "*", schema : null, object_type : "database" }] -} - -variable "ssm_path_prefix" { - type = string - default = "aurora-postgres" - description = "SSM path prefix (with leading but not trailing slash, e.g. \"/rds/cluster_name\")" -} diff --git a/modules/aurora-postgres/outputs.tf b/modules/aurora-postgres/outputs.tf index a545cd631..3e199f377 100644 --- a/modules/aurora-postgres/outputs.tf +++ b/modules/aurora-postgres/outputs.tf @@ -6,6 +6,7 @@ output "database_name" { output "admin_username" { value = module.aurora_postgres_cluster.master_username description = "Postgres admin username" + sensitive = true } output "master_hostname" { @@ -34,13 +35,20 @@ output "config_map" { database = local.database_name hostname = module.aurora_postgres_cluster.master_host port = var.database_port + endpoint = module.aurora_postgres_cluster.endpoint username = module.aurora_postgres_cluster.master_username - password_ssm_key = format("%s/%s", local.ssm_cluster_key_prefix, "admin_password") + password_ssm_key = local.admin_password_key } description = "Map containing information pertinent to a PostgreSQL client configuration." + sensitive = true } -output "additional_users" { - value = local.sanitized_additional_users - description = "Information about additional DB users created by request" +output "kms_key_arn" { + value = module.kms_key_rds.key_arn + description = "KMS key ARN for Aurora Postgres" +} + +output "allowed_security_groups" { + value = local.allowed_security_groups + description = "The resulting list of security group IDs that are allowed to connect to the Aurora Postgres cluster." } diff --git a/modules/aurora-postgres/providers.tf b/modules/aurora-postgres/providers.tf index af20eae2a..ef923e10a 100644 --- a/modules/aurora-postgres/providers.tf +++ b/modules/aurora-postgres/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,22 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -provider "postgresql" { - host = module.aurora_postgres_cluster.master_host - username = module.aurora_postgres_cluster.master_username - password = local.admin_password - superuser = false -} diff --git a/modules/aurora-postgres/read-only-user.tf b/modules/aurora-postgres/read-only-user.tf deleted file mode 100644 index 1fdaeb70f..000000000 --- a/modules/aurora-postgres/read-only-user.tf +++ /dev/null @@ -1,136 +0,0 @@ -locals { - ro_users_enabled = local.enabled && var.read_only_users_enabled - ro_user_ssm_path_prefix = format("%v/read_only", local.ssm_path_prefix) - - cluster_ro_user = "cluster_ro" - all_databases = local.ro_users_enabled ? toset(compact(concat([var.database_name], tolist(var.additional_databases)))) : [] - all_db_ro_grants = { for db in local.all_databases : db => [ - { - grant : ["CONNECT"] - db : db - schema : null - object_type : "database" - }, - { - grant : ["USAGE"] - db : db - schema : "public" - object_type : "schema" - }, - { - grant : ["SELECT"] - db : db - schema : "public" - object_type : "table" - }, - ] } - - # Need a placeholder for the derived admin_user so that we can use users_map in for_each - admin_user_placeholder = "+ADMIN_USER+" - - # Map each db user to the list of databases they have access to - user_dbs = merge({ for service, v in var.additional_users : v.db_user => distinct([for g in v.grants : g.db if g.object_type == "database"]) }, - { (local.admin_user_placeholder) = local.all_databases }) - # Flatten user_dbs into a list of (db, user) pairs - users_map = merge(flatten([for u, dbs in local.user_dbs : { for db in dbs : "${db}_${u}" => { - user = u - db = db - } }])...) - - read_only_users = local.ro_users_enabled ? merge(module.read_only_db_users, - { - cluster = module.read_only_cluster_user[0] - } - ) : {} - sanitized_ro_users = { - for k, v in local.read_only_users : - k => { for kk, vv in v : kk => vv if kk != "db_user_password" } - } -} - -variable "read_only_users_enabled" { - type = bool - default = false - description = "Set `true` to automatically create read-only users for every database" -} - -module "read_only_db_users" { - for_each = local.all_db_ro_grants - source = "./modules/postgresql-user" - - service_name = each.key - db_user = "${each.key}_ro" - db_password = "" - ssm_path_prefix = local.ro_user_ssm_path_prefix - - grants = each.value - - depends_on = [ - module.aurora_postgres_cluster.cluster_identifier, - postgresql_database.additional, - ] - - context = module.this.context -} - -module "read_only_cluster_user" { - count = local.ro_users_enabled ? 1 : 0 - source = "./modules/postgresql-user" - - service_name = "cluster" - db_user = local.cluster_ro_user - db_password = "" - ssm_path_prefix = local.ro_user_ssm_path_prefix - - grants = flatten(values(local.all_db_ro_grants)) - - depends_on = [ - module.aurora_postgres_cluster.cluster_identifier, - postgresql_database.additional, - ] - - context = module.this.context -} - -# For every user with access to a database, ensure that user by default -# grants "SELECT" privileges to every table they create to the db RO user -resource "postgresql_default_privileges" "read_only_tables_users" { - for_each = local.users_map - - role = "${each.value.db}_ro" - database = each.value.db - schema = "public" - - owner = each.value.user == local.admin_user_placeholder ? local.admin_user : each.value.user - object_type = "table" - privileges = ["SELECT"] - - depends_on = [ - module.read_only_db_users, - module.read_only_cluster_user, - ] -} - -# For every user with access to a database, ensure that user by default -# grants "SELECT" privileges to every table they create to the cluster_ro user -resource "postgresql_default_privileges" "read_only_tables_cluster" { - for_each = local.users_map - - role = local.cluster_ro_user - database = each.value.db - schema = "public" - - owner = each.value.user == local.admin_user_placeholder ? local.admin_user : each.value.user - object_type = "table" - privileges = ["SELECT"] - - depends_on = [ - module.read_only_db_users, - module.read_only_cluster_user, - ] -} - -output "read_only_users" { - value = local.ro_users_enabled ? local.sanitized_ro_users : null - description = "List of read only users." -} diff --git a/modules/aurora-postgres/remote-state.tf b/modules/aurora-postgres/remote-state.tf index 69598d5b1..00b7a886e 100644 --- a/modules/aurora-postgres/remote-state.tf +++ b/modules/aurora-postgres/remote-state.tf @@ -1,34 +1,44 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.5.0" - component = "vpc" + component = var.vpc_component_name context = module.cluster.context } -module "vpc_spacelift" { +module "vpc_ingress" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.5.0" - component = "vpc" - stage = var.vpc_spacelift_stage_name + for_each = { + for i, account in var.allow_ingress_from_vpc_accounts : + try(account.tenant, module.this.tenant) != null ? + format("%s-%s", account.tenant, account.stage) : account.stage => account + } + + component = each.value.vpc + tenant = try(each.value.tenant, module.this.tenant) + environment = try(each.value.environment, module.this.environment) + stage = try(each.value.stage, module.this.stage) context = module.cluster.context } module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.5.0" - component = "eks" + for_each = local.eks_security_group_enabled ? var.eks_component_names : toset([]) + component = each.value context = module.cluster.context } + module "dns_gbl_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.5.0" component = "dns-delegated" environment = var.dns_gbl_delegated_environment_name diff --git a/modules/aurora-postgres/ssm.tf b/modules/aurora-postgres/ssm.tf index 397811354..9b74979dd 100644 --- a/modules/aurora-postgres/ssm.tf +++ b/modules/aurora-postgres/ssm.tf @@ -1,81 +1,78 @@ -module "parameter_store_write" { - source = "cloudposse/ssm-parameter-store/aws" - version = "0.8.3" +locals { + ssm_path_prefix = format("/%s/%s", var.ssm_path_prefix, module.cluster.id) - # kms_arn will only be used for SecureString parameters - kms_arn = var.kms_alias_name_ssm # not necessarily ARN β€” alias works too - parameter_write = [ - { - name = format("%s/%s", local.ssm_cluster_key_prefix, "database_name") - value = local.database_name - description = "Aurora Postgres database name" - type = "String" - overwrite = true - }, - { - name = format("%s/%s", local.ssm_cluster_key_prefix, "database_port") - value = var.database_port - description = "Aurora Postgres database name" - type = "String" - overwrite = true - }, + admin_user_key = format("%s/%s/%s", local.ssm_path_prefix, "admin", "user") + admin_password_key = format("%s/%s/%s", local.ssm_path_prefix, "admin", "password") + + cluster_domain = trimprefix(module.aurora_postgres_cluster.endpoint, "${module.aurora_postgres_cluster.cluster_identifier}.cluster-") + + default_parameters = [ { - name = format("%s/%s", local.ssm_cluster_key_prefix, "admin_username") - value = module.aurora_postgres_cluster.master_username - description = "Aurora Postgres admin username" + name = format("%s/%s", local.ssm_path_prefix, "cluster_domain") + value = local.cluster_domain + description = "AWS DNS name under which DB instances are provisioned" type = "String" overwrite = true }, { - name = format("%s/%s", local.ssm_cluster_key_prefix, "admin_password") - value = local.admin_password - description = "Aurora Postgres admin password" + name = format("%s/%s", local.ssm_path_prefix, "db_host") + value = module.aurora_postgres_cluster.master_host + description = "Aurora Postgres DB Master hostname" type = "String" overwrite = true }, { - name = format("%s/%s", local.ssm_cluster_key_prefix, "admin/db_username") - value = module.aurora_postgres_cluster.master_username - description = "Aurora Postgres Username for the admin DB user" + name = format("%s/%s", local.ssm_path_prefix, "db_port") + value = var.database_port + description = "Aurora Postgres DB Master TCP port" type = "String" overwrite = true }, { - name = format("%s/%s", local.ssm_cluster_key_prefix, "admin/db_password") - value = local.admin_password - description = "Aurora Postgres Password for the admin DB user" - type = "SecureString" - overwrite = true - }, - { - name = format("%s/%s", local.ssm_cluster_key_prefix, "master_hostname") - value = module.aurora_postgres_cluster.master_host - description = "Aurora Postgres DB Master hostname" + name = format("%s/%s", local.ssm_path_prefix, "cluster_name") + value = module.aurora_postgres_cluster.cluster_identifier + description = "Aurora Postgres DB Cluster Identifier" type = "String" overwrite = true - }, + } + ] + cluster_parameters = var.cluster_size > 0 ? [ { - name = format("%s/%s", local.ssm_cluster_key_prefix, "replicas_hostname") + name = format("%s/%s", local.ssm_path_prefix, "replicas_hostname") value = module.aurora_postgres_cluster.replicas_host description = "Aurora Postgres DB Replicas hostname" type = "String" overwrite = true }, + ] : [] + admin_user_parameters = [ { - name = format("%s/%s", local.ssm_cluster_key_prefix, "cluster_id") - value = module.aurora_postgres_cluster.cluster_identifier - description = "Aurora Postgres DB Cluster Identifier" + name = local.admin_user_key + value = local.admin_user + description = "Aurora Postgres DB admin user" type = "String" overwrite = true }, { - name = format("%s/%s", local.ssm_cluster_key_prefix, "db_port") - value = var.database_port - description = "Aurora Postgres DB Master port" - type = "String" + name = local.admin_password_key + value = local.admin_password + description = "Aurora Postgres DB admin password" + type = "SecureString" overwrite = true } ] - context = module.this.context + parameter_write = concat(local.default_parameters, local.cluster_parameters, local.admin_user_parameters) +} + +module "parameter_store_write" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.11.0" + + # kms_arn will only be used for SecureString parameters + kms_arn = module.kms_key_rds.key_arn + + parameter_write = local.parameter_write + + context = module.cluster.context } diff --git a/modules/aurora-postgres/variables.tf b/modules/aurora-postgres/variables.tf index 2cb81e38d..ea71e52f0 100644 --- a/modules/aurora-postgres/variables.tf +++ b/modules/aurora-postgres/variables.tf @@ -55,6 +55,18 @@ variable "engine_version" { default = "13.4" } +variable "allow_major_version_upgrade" { + type = bool + default = false + description = "Enable to allow major engine version upgrades when changing engine versions. Defaults to false." +} + +variable "ca_cert_identifier" { + description = "The identifier of the CA certificate for the DB instance" + type = string + default = null +} + variable "engine_mode" { type = string description = "The database engine mode. Valid values: `global`, `multimaster`, `parallelquery`, `provisioned`, `serverless`" @@ -66,12 +78,6 @@ variable "cluster_family" { default = "aurora-postgresql13" } -# AWS KMS alias used for encryption/decryption of SSM secure strings -variable "kms_alias_name_ssm" { - default = "alias/aws/ssm" - description = "KMS alias name for SSM" -} - variable "database_port" { type = number description = "Database port" @@ -145,26 +151,6 @@ variable "reader_dns_name_part" { default = "reader" } -variable "additional_databases" { - type = set(string) - default = [] -} - -variable "additional_users" { - # map key is service name, becomes part of SSM key name - type = map(object({ - db_user : string - db_password : string - grants : list(object({ - grant : list(string) - db : string - schema : string - object_type : string - })) - })) - default = {} -} - variable "ssm_path_prefix" { type = string default = "aurora-postgres" @@ -207,6 +193,12 @@ variable "enhanced_monitoring_role_enabled" { default = true } +variable "enhanced_monitoring_attributes" { + type = list(string) + description = "Attributes used to format the Enhanced Monitoring IAM role. If this role hits IAM role length restrictions (max 64 characters), consider shortening these strings." + default = ["enhanced-monitoring"] +} + variable "rds_monitoring_interval" { type = number description = "The interval, in seconds, between points when enhanced monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60" @@ -267,9 +259,96 @@ variable "snapshot_identifier" { description = "Specifies whether or not to create this cluster from a snapshot" } -variable "vpc_spacelift_stage_name" { +variable "allowed_security_group_names" { + type = list(string) + description = "List of security group names (tags) that should be allowed access to the database" + default = [] +} + +variable "eks_security_group_enabled" { + type = bool + description = "Use the eks default security group" + default = false +} + +variable "eks_component_names" { + type = set(string) + description = "The names of the eks components" + default = ["eks/cluster"] +} + +variable "allow_ingress_from_vpc_accounts" { + type = list(object({ + vpc = optional(string, "vpc") + environment = optional(string) + stage = optional(string) + tenant = optional(string) + })) + default = [] + description = <<-EOF + List of account contexts to pull VPC ingress CIDR and add to cluster security group. + e.g. + { + environment = "ue2", + stage = "auto", + tenant = "core" + } + + Defaults to the "vpc" component in the given account + EOF +} + +variable "vpc_component_name" { type = string - default = "auto" - description = "The name of the stage of the `vpc` component where `spacelift-worker-pool` is provisioned" + default = "vpc" + description = "The name of the VPC component" +} + +variable "scaling_configuration" { + type = list(object({ + auto_pause = bool + max_capacity = number + min_capacity = number + seconds_until_auto_pause = number + timeout_action = string + })) + default = [] + description = "List of nested attributes with scaling properties. Only valid when `engine_mode` is set to `serverless`. This is required for Serverless v1" } +variable "serverlessv2_scaling_configuration" { + type = object({ + min_capacity = number + max_capacity = number + }) + default = null + description = "Nested attribute with scaling properties for ServerlessV2. Only valid when `engine_mode` is set to `provisioned.` This is required for Serverless v2" +} + +variable "intra_security_group_traffic_enabled" { + type = bool + default = false + description = "Whether to allow traffic between resources inside the database's security group." +} + +variable "cluster_parameters" { + type = list(object({ + apply_method = string + name = string + value = string + })) + default = [] + description = "List of DB cluster parameters to apply" +} + +variable "retention_period" { + type = number + default = 5 + description = "Number of days to retain backups for" +} + +variable "backup_window" { + type = string + default = "07:00-09:00" + description = "Daily time range during which the backups happen, UTC" +} diff --git a/modules/aurora-postgres/versions.tf b/modules/aurora-postgres/versions.tf index ecb1ce1aa..6b2f61ae6 100644 --- a/modules/aurora-postgres/versions.tf +++ b/modules/aurora-postgres/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.0" + version = ">= 4.9.0" } random = { source = "hashicorp/random" @@ -12,7 +12,7 @@ terraform { } postgresql = { source = "cyrilgdn/postgresql" - version = ">= 1.14.0" + version = ">= 1.17.1" } } } diff --git a/modules/auth0/app/README.md b/modules/auth0/app/README.md new file mode 100644 index 000000000..87f22e1ed --- /dev/null +++ b/modules/auth0/app/README.md @@ -0,0 +1,147 @@ +# Component: `auth0/app` + +Auth0 Application component. [Auth0](https://auth0.com/docs/) is a third-party service that provides authentication and +authorization as a service. It is typically used to to authenticate users. + +An Auth0 application is a client that can request authentication and authorization from an Auth0 server. Auth0 +applications can be of different types, such as regular web applications, single-page applications, machine-to-machine +applications, and others. Each application has a set of allowed origins, allowed callback URLs, and allowed web origins. + +## Usage + +Before deploying this component, you need to deploy the `auth0/tenant` component. This components with authenticate with +the [Auth0 Terraform provider](https://registry.terraform.io/providers/auth0/auth0/latest/) using the Auth0 tenant's +client ID and client secret configured with the `auth0/tenant` component. + +**Stack Level**: Global + +Here's an example snippet for how to use this component. + +> [!IMPORTANT] +> +> Be sure that the context ID does not overlap with the context ID of other Auth0 components, such as `auth0/tenant`. We +> use this ID to generate the SSM parameter names. + +```yaml +# stacks/catalog/auth0/app.yaml +components: + terraform: + auth0/app: + vars: + enabled: true + name: "auth0-app" + + # We can centralize plat-sandbox, plat-dev, and plat-staging all use a "nonprod" Auth0 tenant, which is deployed in plat-staging. + auth0_tenant_stage_name: "plat-staging" + + # Common client configuration + grant_types: + - "authorization_code" + - "refresh_token" + - "implicit" + - "client_credentials" + + # Stage-specific client configuration + callbacks: + - "https://auth.acme-dev.com/login/auth0/callback" + allowed_origins: + - "https://*.acme-dev.com" + web_origins: + - "https://portal.acme-dev.com" + - "https://auth.acme-dev.com" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [auth0](#requirement\_auth0) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [auth0](#provider\_auth0) | >= 1.0.0 | +| [aws.auth0\_provider](#provider\_aws.auth0\_provider) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [auth0\_ssm\_parameters](#module\_auth0\_ssm\_parameters) | cloudposse/ssm-parameter-store/aws | 0.13.0 | +| [auth0\_tenant](#module\_auth0\_tenant) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [iam\_roles\_auth0\_provider](#module\_iam\_roles\_auth0\_provider) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [auth0_client.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/client) | resource | +| [auth0_client_credentials.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/client_credentials) | resource | +| [aws_ssm_parameter.auth0_client_id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.auth0_client_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.auth0_domain](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [allowed\_origins](#input\_allowed\_origins) | Allowed Origins | `list(string)` | `[]` | no | +| [app\_type](#input\_app\_type) | Auth0 Application Type | `string` | `"regular_web"` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [auth0\_debug](#input\_auth0\_debug) | Enable debug mode for the Auth0 provider | `bool` | `true` | no | +| [auth0\_tenant\_component\_name](#input\_auth0\_tenant\_component\_name) | The name of the component | `string` | `"auth0/tenant"` | no | +| [auth0\_tenant\_environment\_name](#input\_auth0\_tenant\_environment\_name) | The name of the environment where the Auth0 tenant component is deployed. Defaults to the environment of the current stack. | `string` | `""` | no | +| [auth0\_tenant\_stage\_name](#input\_auth0\_tenant\_stage\_name) | The name of the stage where the Auth0 tenant component is deployed. Defaults to the stage of the current stack. | `string` | `""` | no | +| [auth0\_tenant\_tenant\_name](#input\_auth0\_tenant\_tenant\_name) | The name of the tenant where the Auth0 tenant component is deployed. Yes this is a bit redundant, since Auth0 also calls this resource a tenant. Defaults to the tenant of the current stack. | `string` | `""` | no | +| [authentication\_method](#input\_authentication\_method) | The authentication method for the client credentials | `string` | `"client_secret_post"` | no | +| [callbacks](#input\_callbacks) | Allowed Callback URLs | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [grant\_types](#input\_grant\_types) | Allowed Grant Types | `list(string)` | `[]` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [jwt\_alg](#input\_jwt\_alg) | JWT Algorithm | `string` | `"RS256"` | no | +| [jwt\_lifetime\_in\_seconds](#input\_jwt\_lifetime\_in\_seconds) | JWT Lifetime in Seconds | `number` | `36000` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [logo\_uri](#input\_logo\_uri) | Logo URI | `string` | `"https://cloudposse.com/wp-content/uploads/2017/07/CloudPosse2-TRANSAPRENT.png"` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [oidc\_conformant](#input\_oidc\_conformant) | OIDC Conformant | `bool` | `true` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [ssm\_base\_path](#input\_ssm\_base\_path) | The base path for the SSM parameters. If not defined, this is set to the module context ID. This is also required when `var.enabled` is set to `false` | `string` | `""` | no | +| [sso](#input\_sso) | Single Sign-On for the Auth0 app | `bool` | `true` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [web\_origins](#input\_web\_origins) | Allowed Web Origins | `list(string)` | `[]` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [auth0\_client\_id](#output\_auth0\_client\_id) | The Auth0 Application Client ID | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/auth0) - + Cloud Posse's upstream component +- [Auth0 Terraform Provider](https://registry.terraform.io/providers/auth0/auth0/latest/) +- [Auth0 Documentation](https://auth0.com/docs/) + +[](https://cpco.io/component) diff --git a/modules/auth0/app/context.tf b/modules/auth0/app/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/auth0/app/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/auth0/app/main.tf b/modules/auth0/app/main.tf new file mode 100644 index 000000000..294a4db55 --- /dev/null +++ b/modules/auth0/app/main.tf @@ -0,0 +1,62 @@ +locals { + enabled = module.this.enabled + + ssm_path = coalesce(var.ssm_base_path, module.this.id) + client_id_ssm_path = format("/%s/client_id", local.ssm_path) + client_secret_ssm_path = format("/%s/client_secret", local.ssm_path) +} + +resource "auth0_client" "this" { + count = local.enabled ? 1 : 0 + + name = module.this.id + + app_type = var.app_type + oidc_conformant = var.oidc_conformant + sso = var.sso + + jwt_configuration { + lifetime_in_seconds = var.jwt_lifetime_in_seconds + alg = var.jwt_alg + } + + callbacks = var.callbacks + allowed_origins = var.allowed_origins + web_origins = var.web_origins + grant_types = var.grant_types + logo_uri = var.logo_uri + +} + +resource "auth0_client_credentials" "this" { + count = local.enabled ? 1 : 0 + + client_id = try(auth0_client.this[0].client_id, "") + authentication_method = var.authentication_method +} + +module "auth0_ssm_parameters" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.13.0" + + enabled = local.enabled + + parameter_write = [ + { + name = local.client_id_ssm_path + value = try(auth0_client.this[0].client_id, "") + type = "SecureString" + overwrite = "true" + description = "Auth0 client ID for the Auth0 ${module.this.id} application" + }, + { + name = local.client_secret_ssm_path + value = try(auth0_client_credentials.this[0].client_secret, "") + type = "SecureString" + overwrite = "true" + description = "Auth0 client secret for the Auth0 ${module.this.id} application" + } + ] + + context = module.this.context +} diff --git a/modules/auth0/app/outputs.tf b/modules/auth0/app/outputs.tf new file mode 100644 index 000000000..f169d9e94 --- /dev/null +++ b/modules/auth0/app/outputs.tf @@ -0,0 +1,4 @@ +output "auth0_client_id" { + value = auth0_client.this[0].client_id + description = "The Auth0 Application Client ID" +} diff --git a/modules/auth0/app/provider-auth0-client.tf b/modules/auth0/app/provider-auth0-client.tf new file mode 100644 index 000000000..f7fb49f27 --- /dev/null +++ b/modules/auth0/app/provider-auth0-client.tf @@ -0,0 +1,107 @@ +# +# Fetch the Auth0 tenant component deployment in some stack +# +variable "auth0_tenant_component_name" { + description = "The name of the component" + type = string + default = "auth0/tenant" +} + +variable "auth0_tenant_environment_name" { + description = "The name of the environment where the Auth0 tenant component is deployed. Defaults to the environment of the current stack." + type = string + default = "" +} + +variable "auth0_tenant_stage_name" { + description = "The name of the stage where the Auth0 tenant component is deployed. Defaults to the stage of the current stack." + type = string + default = "" +} + +variable "auth0_tenant_tenant_name" { + description = "The name of the tenant where the Auth0 tenant component is deployed. Yes this is a bit redundant, since Auth0 also calls this resource a tenant. Defaults to the tenant of the current stack." + type = string + default = "" +} + +locals { + auth0_tenant_environment_name = length(var.auth0_tenant_environment_name) > 0 ? var.auth0_tenant_environment_name : module.this.environment + auth0_tenant_stage_name = length(var.auth0_tenant_stage_name) > 0 ? var.auth0_tenant_stage_name : module.this.stage + auth0_tenant_tenant_name = length(var.auth0_tenant_tenant_name) > 0 ? var.auth0_tenant_tenant_name : module.this.tenant +} + +module "auth0_tenant" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.enabled ? 1 : 0 + + component = var.auth0_tenant_component_name + + environment = local.auth0_tenant_environment_name + stage = local.auth0_tenant_stage_name + tenant = local.auth0_tenant_tenant_name +} + +# +# Set up the AWS provider to access AWS SSM parameters in the same account as the Auth0 tenant +# + +provider "aws" { + alias = "auth0_provider" + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles_auth0_provider.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles_auth0_provider.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles_auth0_provider.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles_auth0_provider" { + source = "../../account-map/modules/iam-roles" + + environment = local.auth0_tenant_environment_name + stage = local.auth0_tenant_stage_name + tenant = local.auth0_tenant_tenant_name + + context = module.this.context +} + +data "aws_ssm_parameter" "auth0_domain" { + provider = aws.auth0_provider + name = module.auth0_tenant[0].outputs.domain_ssm_path +} + +data "aws_ssm_parameter" "auth0_client_id" { + provider = aws.auth0_provider + name = module.auth0_tenant[0].outputs.client_id_ssm_path +} + +data "aws_ssm_parameter" "auth0_client_secret" { + provider = aws.auth0_provider + name = module.auth0_tenant[0].outputs.client_secret_ssm_path +} + +# +# Initialize the Auth0 provider with the Auth0 domain, client ID, and client secret from that deployment +# + +variable "auth0_debug" { + type = bool + description = "Enable debug mode for the Auth0 provider" + default = true +} + +provider "auth0" { + domain = data.aws_ssm_parameter.auth0_domain.value + client_id = data.aws_ssm_parameter.auth0_client_id.value + client_secret = data.aws_ssm_parameter.auth0_client_secret.value + debug = var.auth0_debug +} diff --git a/modules/auth0/app/providers.tf b/modules/auth0/app/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/auth0/app/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/auth0/app/variables.tf b/modules/auth0/app/variables.tf new file mode 100644 index 000000000..62fb09e71 --- /dev/null +++ b/modules/auth0/app/variables.tf @@ -0,0 +1,76 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "callbacks" { + type = list(string) + description = "Allowed Callback URLs" + default = [] +} + +variable "allowed_origins" { + type = list(string) + description = "Allowed Origins" + default = [] +} + +variable "web_origins" { + type = list(string) + description = "Allowed Web Origins" + default = [] +} + +variable "grant_types" { + type = list(string) + description = "Allowed Grant Types" + default = [] +} + +variable "logo_uri" { + type = string + description = "Logo URI" + default = "https://cloudposse.com/wp-content/uploads/2017/07/CloudPosse2-TRANSAPRENT.png" +} + +variable "app_type" { + type = string + description = "Auth0 Application Type" + default = "regular_web" +} + +variable "oidc_conformant" { + type = bool + description = "OIDC Conformant" + default = true +} + +variable "sso" { + type = bool + description = "Single Sign-On for the Auth0 app" + default = true +} + +variable "jwt_lifetime_in_seconds" { + type = number + description = "JWT Lifetime in Seconds" + default = 36000 +} + +variable "jwt_alg" { + type = string + description = "JWT Algorithm" + default = "RS256" +} + +variable "ssm_base_path" { + type = string + description = "The base path for the SSM parameters. If not defined, this is set to the module context ID. This is also required when `var.enabled` is set to `false`" + default = "" +} + +variable "authentication_method" { + type = string + description = "The authentication method for the client credentials" + default = "client_secret_post" +} diff --git a/modules/auth0/app/versions.tf b/modules/auth0/app/versions.tf new file mode 100644 index 000000000..3894f08a9 --- /dev/null +++ b/modules/auth0/app/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + auth0 = { + source = "auth0/auth0" + version = ">= 1.0.0" + } + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/auth0/connection/README.md b/modules/auth0/connection/README.md new file mode 100644 index 000000000..ddeeb5298 --- /dev/null +++ b/modules/auth0/connection/README.md @@ -0,0 +1,152 @@ +# Component: `auth0/connection` + +Auth 0 Connection component. [Auth0](https://auth0.com/docs/) is a third-party service that provides authentication and +authorization as a service. It is typically used to to authenticate users. + +An Auth0 connection is a bridge between Auth0 and an identity provider (IdP) that allows your application to +authenticate users. Auth0 supports many types of connections, including social identity providers such as Google, +Facebook, and Twitter, enterprise identity providers such as Microsoft Azure AD, and passwordless authentication methods +such as email and SMS. + +## Usage + +Before deploying this component, you need to deploy the `auth0/tenant` component. This components with authenticate with +the [Auth0 Terraform provider](https://registry.terraform.io/providers/auth0/auth0/latest/) using the Auth0 tenant's +client ID and client secret configured with the `auth0/tenant` component. + +**Stack Level**: Global + +Here's an example snippet for how to use this component. + +```yaml +# stacks/catalog/auth0/connection.yaml +components: + terraform: + auth0/connection: + vars: + enabled: true + name: "auth0" + + # These must all be specified for the connection to be created + strategy: "email" + connection_name: "email" + options_name: "email" + + email_from: "{{`{{ application.name }}`}} " + email_subject: "Welcome to {{`{{ application.name }}`}}" + syntax: "liquid" + + auth_params: + scope: "openid profile" + response_type: "code" + + totp: + time_step: 895 + length: 6 + + template_file: "templates/email.html" + + # Stage-specific configuration + auth0_app_connections: + - stage: sandbox + - stage: dev + - stage: staging +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [auth0](#requirement\_auth0) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [auth0](#provider\_auth0) | >= 1.0.0 | +| [aws.auth0\_provider](#provider\_aws.auth0\_provider) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [auth0\_apps](#module\_auth0\_apps) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [auth0\_tenant](#module\_auth0\_tenant) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [iam\_roles\_auth0\_provider](#module\_iam\_roles\_auth0\_provider) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [auth0_connection.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/connection) | resource | +| [auth0_connection_clients.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/connection_clients) | resource | +| [aws_ssm_parameter.auth0_client_id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.auth0_client_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.auth0_domain](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [auth0\_app\_connections](#input\_auth0\_app\_connections) | The list of Auth0 apps to add to this connection |
list(object({
component = optional(string, "auth0/app")
environment = optional(string, "")
stage = optional(string, "")
tenant = optional(string, "")
}))
| `[]` | no | +| [auth0\_debug](#input\_auth0\_debug) | Enable debug mode for the Auth0 provider | `bool` | `true` | no | +| [auth0\_tenant\_component\_name](#input\_auth0\_tenant\_component\_name) | The name of the component | `string` | `"auth0/tenant"` | no | +| [auth0\_tenant\_environment\_name](#input\_auth0\_tenant\_environment\_name) | The name of the environment where the Auth0 tenant component is deployed. Defaults to the environment of the current stack. | `string` | `""` | no | +| [auth0\_tenant\_stage\_name](#input\_auth0\_tenant\_stage\_name) | The name of the stage where the Auth0 tenant component is deployed. Defaults to the stage of the current stack. | `string` | `""` | no | +| [auth0\_tenant\_tenant\_name](#input\_auth0\_tenant\_tenant\_name) | The name of the tenant where the Auth0 tenant component is deployed. Yes this is a bit redundant, since Auth0 also calls this resource a tenant. Defaults to the tenant of the current stack. | `string` | `""` | no | +| [auth\_params](#input\_auth\_params) | Query string parameters to be included as part of the generated passwordless email link. |
object({
scope = optional(string, null)
response_type = optional(string, null)
})
| `{}` | no | +| [brute\_force\_protection](#input\_brute\_force\_protection) | Indicates whether to enable brute force protection, which will limit the number of signups and failed logins from a suspicious IP address. | `bool` | `true` | no | +| [connection\_name](#input\_connection\_name) | The name of the connection | `string` | `""` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [disable\_signup](#input\_disable\_signup) | Indicates whether to allow user sign-ups to your application. | `bool` | `false` | no | +| [email\_from](#input\_email\_from) | When using an email strategy, the address to use as the sender | `string` | `null` | no | +| [email\_subject](#input\_email\_subject) | When using an email strategy, the subject of the email | `string` | `null` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [non\_persistent\_attrs](#input\_non\_persistent\_attrs) | If there are user fields that should not be stored in Auth0 databases due to privacy reasons, you can add them to the DenyList here. | `list(string)` | `[]` | no | +| [options\_name](#input\_options\_name) | The name of the connection options. Required for the email strategy. | `string` | `""` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [set\_user\_root\_attributes](#input\_set\_user\_root\_attributes) | Determines whether to sync user profile attributes at each login or only on the first login. Options include: `on_each_login`, `on_first_login`. | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [strategy](#input\_strategy) | The strategy to use for the connection | `string` | `"auth0"` | no | +| [syntax](#input\_syntax) | The syntax of the template body | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [template](#input\_template) | The template to use for the connection. If not provided, the `template_file` variable must be set. | `string` | `""` | no | +| [template\_file](#input\_template\_file) | The path to the template file. If not provided, the `template` variable must be set. | `string` | `""` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [totp](#input\_totp) | The TOTP settings for the connection |
object({
time_step = optional(number, 900)
length = optional(number, 6)
})
| `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [auth0\_connection\_id](#output\_auth0\_connection\_id) | The Auth0 Connection ID | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/auth0/connection) - + Cloud Posse's upstream component +- [Auth0 Terraform Provider](https://registry.terraform.io/providers/auth0/auth0/latest/) +- [Auth0 Documentation](https://auth0.com/docs/) + +[](https://cpco.io/component) diff --git a/modules/auth0/connection/context.tf b/modules/auth0/connection/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/auth0/connection/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/auth0/connection/main.tf b/modules/auth0/connection/main.tf new file mode 100644 index 000000000..2451cf70d --- /dev/null +++ b/modules/auth0/connection/main.tf @@ -0,0 +1,41 @@ +locals { + enabled = module.this.enabled + + # If var.template is set, use it. + # Otherwise, if var.template_file is set, use that file content. + # Otherwise, use null. + template = length(var.template) > 0 ? var.template : length(var.template_file) > 0 ? file("${path.module}/${var.template_file}") : null +} + +resource "auth0_connection" "this" { + count = local.enabled ? 1 : 0 + + strategy = var.strategy + name = length(var.connection_name) > 0 ? var.connection_name : module.this.name + + options { + name = var.options_name + from = var.email_from + subject = var.email_subject + syntax = var.syntax + template = local.template + + disable_signup = var.disable_signup + brute_force_protection = var.brute_force_protection + set_user_root_attributes = var.set_user_root_attributes + non_persistent_attrs = var.non_persistent_attrs + auth_params = var.auth_params + + totp { + time_step = var.totp.time_step + length = var.totp.length + } + } +} + +resource "auth0_connection_clients" "this" { + count = local.enabled ? 1 : 0 + + connection_id = auth0_connection.this[0].id + enabled_clients = length(module.auth0_apps) > 0 ? [for auth0_app in module.auth0_apps : auth0_app.outputs.auth0_client_id] : [] +} diff --git a/modules/auth0/connection/outputs.tf b/modules/auth0/connection/outputs.tf new file mode 100644 index 000000000..e1a0f06f1 --- /dev/null +++ b/modules/auth0/connection/outputs.tf @@ -0,0 +1,4 @@ +output "auth0_connection_id" { + value = auth0_connection.this[0].id + description = "The Auth0 Connection ID" +} diff --git a/modules/auth0/connection/provider-auth0-client.tf b/modules/auth0/connection/provider-auth0-client.tf new file mode 100644 index 000000000..f7fb49f27 --- /dev/null +++ b/modules/auth0/connection/provider-auth0-client.tf @@ -0,0 +1,107 @@ +# +# Fetch the Auth0 tenant component deployment in some stack +# +variable "auth0_tenant_component_name" { + description = "The name of the component" + type = string + default = "auth0/tenant" +} + +variable "auth0_tenant_environment_name" { + description = "The name of the environment where the Auth0 tenant component is deployed. Defaults to the environment of the current stack." + type = string + default = "" +} + +variable "auth0_tenant_stage_name" { + description = "The name of the stage where the Auth0 tenant component is deployed. Defaults to the stage of the current stack." + type = string + default = "" +} + +variable "auth0_tenant_tenant_name" { + description = "The name of the tenant where the Auth0 tenant component is deployed. Yes this is a bit redundant, since Auth0 also calls this resource a tenant. Defaults to the tenant of the current stack." + type = string + default = "" +} + +locals { + auth0_tenant_environment_name = length(var.auth0_tenant_environment_name) > 0 ? var.auth0_tenant_environment_name : module.this.environment + auth0_tenant_stage_name = length(var.auth0_tenant_stage_name) > 0 ? var.auth0_tenant_stage_name : module.this.stage + auth0_tenant_tenant_name = length(var.auth0_tenant_tenant_name) > 0 ? var.auth0_tenant_tenant_name : module.this.tenant +} + +module "auth0_tenant" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.enabled ? 1 : 0 + + component = var.auth0_tenant_component_name + + environment = local.auth0_tenant_environment_name + stage = local.auth0_tenant_stage_name + tenant = local.auth0_tenant_tenant_name +} + +# +# Set up the AWS provider to access AWS SSM parameters in the same account as the Auth0 tenant +# + +provider "aws" { + alias = "auth0_provider" + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles_auth0_provider.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles_auth0_provider.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles_auth0_provider.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles_auth0_provider" { + source = "../../account-map/modules/iam-roles" + + environment = local.auth0_tenant_environment_name + stage = local.auth0_tenant_stage_name + tenant = local.auth0_tenant_tenant_name + + context = module.this.context +} + +data "aws_ssm_parameter" "auth0_domain" { + provider = aws.auth0_provider + name = module.auth0_tenant[0].outputs.domain_ssm_path +} + +data "aws_ssm_parameter" "auth0_client_id" { + provider = aws.auth0_provider + name = module.auth0_tenant[0].outputs.client_id_ssm_path +} + +data "aws_ssm_parameter" "auth0_client_secret" { + provider = aws.auth0_provider + name = module.auth0_tenant[0].outputs.client_secret_ssm_path +} + +# +# Initialize the Auth0 provider with the Auth0 domain, client ID, and client secret from that deployment +# + +variable "auth0_debug" { + type = bool + description = "Enable debug mode for the Auth0 provider" + default = true +} + +provider "auth0" { + domain = data.aws_ssm_parameter.auth0_domain.value + client_id = data.aws_ssm_parameter.auth0_client_id.value + client_secret = data.aws_ssm_parameter.auth0_client_secret.value + debug = var.auth0_debug +} diff --git a/modules/auth0/connection/providers.tf b/modules/auth0/connection/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/auth0/connection/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/auth0/connection/remote-state.tf b/modules/auth0/connection/remote-state.tf new file mode 100644 index 000000000..068ecc6b7 --- /dev/null +++ b/modules/auth0/connection/remote-state.tf @@ -0,0 +1,11 @@ +module "auth0_apps" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + for_each = local.enabled ? { for app in var.auth0_app_connections : "${app.tenant}-${app.environment}-${app.stage}-${app.component}" => app } : {} + + component = each.value.component + tenant = length(each.value.tenant) > 0 ? each.value.tenant : module.this.tenant + environment = length(each.value.environment) > 0 ? each.value.environment : module.this.environment + stage = length(each.value.stage) > 0 ? each.value.stage : module.this.stage +} diff --git a/modules/auth0/connection/templates/.gitkeep b/modules/auth0/connection/templates/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/modules/auth0/connection/variables.tf b/modules/auth0/connection/variables.tf new file mode 100644 index 000000000..748d3bb19 --- /dev/null +++ b/modules/auth0/connection/variables.tf @@ -0,0 +1,105 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "auth0_app_connections" { + type = list(object({ + component = optional(string, "auth0/app") + environment = optional(string, "") + stage = optional(string, "") + tenant = optional(string, "") + })) + default = [] + description = "The list of Auth0 apps to add to this connection" +} + +variable "strategy" { + type = string + description = "The strategy to use for the connection" + default = "auth0" +} + +variable "connection_name" { + type = string + description = "The name of the connection" + default = "" +} + +variable "options_name" { + type = string + description = "The name of the connection options. Required for the email strategy." + default = "" +} + +variable "email_from" { + type = string + description = "When using an email strategy, the address to use as the sender" + default = null +} + +variable "email_subject" { + type = string + description = "When using an email strategy, the subject of the email" + default = null +} + +variable "syntax" { + type = string + description = "The syntax of the template body" + default = null +} + +variable "disable_signup" { + type = bool + description = "Indicates whether to allow user sign-ups to your application." + default = false +} + +variable "brute_force_protection" { + type = bool + description = "Indicates whether to enable brute force protection, which will limit the number of signups and failed logins from a suspicious IP address." + default = true +} + +variable "set_user_root_attributes" { + type = string + description = "Determines whether to sync user profile attributes at each login or only on the first login. Options include: `on_each_login`, `on_first_login`." + default = null +} + +variable "non_persistent_attrs" { + type = list(string) + description = "If there are user fields that should not be stored in Auth0 databases due to privacy reasons, you can add them to the DenyList here." + default = [] +} + +variable "auth_params" { + type = object({ + scope = optional(string, null) + response_type = optional(string, null) + }) + description = "Query string parameters to be included as part of the generated passwordless email link." + default = {} +} + +variable "totp" { + type = object({ + time_step = optional(number, 900) + length = optional(number, 6) + }) + description = "The TOTP settings for the connection" + default = {} +} + +variable "template_file" { + type = string + description = "The path to the template file. If not provided, the `template` variable must be set." + default = "" +} + +variable "template" { + type = string + description = "The template to use for the connection. If not provided, the `template_file` variable must be set." + default = "" +} diff --git a/modules/auth0/connection/versions.tf b/modules/auth0/connection/versions.tf new file mode 100644 index 000000000..3894f08a9 --- /dev/null +++ b/modules/auth0/connection/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + auth0 = { + source = "auth0/auth0" + version = ">= 1.0.0" + } + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/auth0/tenant/README.md b/modules/auth0/tenant/README.md new file mode 100644 index 000000000..171ce4e0c --- /dev/null +++ b/modules/auth0/tenant/README.md @@ -0,0 +1,182 @@ +# Component: `auth0/tenant` + +This component configures an [Auth0](https://auth0.com/docs/) tenant. This component is used to configure authentication +for the Terraform provider for Auth0 and to configure the Auth0 tenant itself. + +## Usage + +**Stack Level**: Global + +Here's an example snippet for how to use this component. + +```yaml +# catalog/auth0/tenant.yaml +components: + terraform: + auth0/tenant: + vars: + enabled: true + # Make sure this name does not conflict with other Auth0 components, such as `auth0/app` + name: auth0 + support_email: "tech@acme.com" + support_url: "https://acme.com" +``` + +### Auth0 Tenant Creation + +Chicken before the egg... + +The Auth0 tenant must exist before we can manage it with Terraform. In order to create the Auth0 application used by the +[Auth0 Terraform provider](https://registry.terraform.io/providers/auth0/auth0/latest/), we must first create the Auth0 +tenant. Then once we have the Auth0 provider configured, we can import the tenant into Terraform. However, the tenant is +not a resource identifiable by an ID within the Auth0 Management API! We can nevertheless import it using a random +string. On first run, we import the existing tenant using a random string. It does not matter what this value is. +Terraform will use the same tenant as the Auth0 application for the Terraform Auth0 Provider. + +Create the Auth0 tenant now using the Auth0 Management API or the Auth0 Dashboard following +[the Auth0 create tenants documentation](https://auth0.com/docs/get-started/auth0-overview/create-tenants). + +### Provider Pre-requisites + +Once the Auth0 tenant is created or you've been given access to an existing tenant, you can configure the Auth0 provider +in Terraform. Follow the +[Auth0 provider documentation](https://registry.terraform.io/providers/auth0/auth0/latest/docs/guides/quickstart) to +create a Machine to Machine application. + +> [!TIP] +> +> #### Machine to Machine App Name +> +> Use the Context Label format for the machine name for consistency. For example, `acme-plat-gbl-prod-auth0-provider`. + +After creating the Machine to Machine application, add the app's domain, client ID, and client secret to AWS Systems +Manager Parameter Store in the same account and region as this component deployment. The path for the parameters are +defined by the component deployment's Null Label context ID as follows: + +```hcl +auth0_domain_ssm_path = "/${module.this.id}/domain" +auth0_client_id_ssm_path = "/${module.this.id}/client_id" +auth0_client_secret_ssm_path = "/${module.this.id}/client_secret" +``` + +For example, if we're deploying `auth0/tenant` into `plat-gbl-prod` and my default region is `us-west-2`, then I would +add the following parameters to the `plat-prod` account in `us-west-2`: + +> [!IMPORTANT] +> +> Be sure that this AWS SSM parameter path does not conflict with SSM parameters used by other Auth0 components, such as +> `auth0/app`. In both components, the SSM parameter paths are defined by the component deployment's context ID. + +``` +/acme-plat-gbl-prod-auth0/domain +/acme-plat-gbl-prod-auth0/client_id +/acme-plat-gbl-prod-auth0/client_secret +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [auth0](#requirement\_auth0) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [auth0](#provider\_auth0) | >= 1.0.0 | +| [aws](#provider\_aws) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [auth0_custom_domain.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/custom_domain) | resource | +| [auth0_custom_domain_verification.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/custom_domain_verification) | resource | +| [auth0_email_provider.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/email_provider) | resource | +| [auth0_prompt.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/prompt) | resource | +| [auth0_tenant.this](https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/tenant) | resource | +| [aws_route53_record.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_ssm_parameter.auth0_client_id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.auth0_client_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.auth0_domain](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.sendgrid_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [allowed\_logout\_urls](#input\_allowed\_logout\_urls) | The URLs that Auth0 can redirect to after logout. | `list(string)` | `[]` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [auth0\_debug](#input\_auth0\_debug) | Enable debug mode for the Auth0 provider | `bool` | `true` | no | +| [auth0\_prompt\_experience](#input\_auth0\_prompt\_experience) | Which prompt login experience to use. Options include classic and new. | `string` | `"new"` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [default\_redirection\_uri](#input\_default\_redirection\_uri) | The default redirection URI. | `string` | `""` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [disable\_clickjack\_protection\_headers](#input\_disable\_clickjack\_protection\_headers) | Whether to disable clickjack protection headers. | `bool` | `true` | no | +| [disable\_fields\_map\_fix](#input\_disable\_fields\_map\_fix) | Whether to disable fields map fix. | `bool` | `false` | no | +| [disable\_management\_api\_sms\_obfuscation](#input\_disable\_management\_api\_sms\_obfuscation) | Whether to disable management API SMS obfuscation. | `bool` | `false` | no | +| [email\_provider\_default\_from\_address](#input\_email\_provider\_default\_from\_address) | The default from address for the email provider. | `string` | `""` | no | +| [email\_provider\_name](#input\_email\_provider\_name) | The name of the email provider. If not defined, no email provider will be created. | `string` | `""` | no | +| [enable\_public\_signup\_user\_exists\_error](#input\_enable\_public\_signup\_user\_exists\_error) | Whether to enable public signup user exists error. | `bool` | `true` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [enabled\_locales](#input\_enabled\_locales) | The enabled locales. | `list(string)` |
[
"en"
]
| no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [friendly\_name](#input\_friendly\_name) | The friendly name of the Auth0 tenant. If not provided, the module context ID will be used. | `string` | `""` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [idle\_session\_lifetime](#input\_idle\_session\_lifetime) | The idle session lifetime in hours. | `number` | `72` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [no\_disclose\_enterprise\_connections](#input\_no\_disclose\_enterprise\_connections) | Whether to disclose enterprise connections. | `bool` | `false` | no | +| [oidc\_logout\_prompt\_enabled](#input\_oidc\_logout\_prompt\_enabled) | Whether the OIDC logout prompt is enabled. | `bool` | `false` | no | +| [picture\_url](#input\_picture\_url) | The URL of the picture to be displayed in the Auth0 Universal Login page. | `string` | `"https://cloudposse.com/wp-content/uploads/2017/07/CloudPosse2-TRANSAPRENT.png"` | no | +| [provider\_ssm\_base\_path](#input\_provider\_ssm\_base\_path) | The base path for the SSM parameters. If not defined, this is set to the module context ID. This is also required when `var.enabled` is set to `false` | `string` | `""` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [sandbox\_version](#input\_sandbox\_version) | The sandbox version. | `string` | `"18"` | no | +| [sendgrid\_api\_key\_ssm\_path](#input\_sendgrid\_api\_key\_ssm\_path) | The SSM path to the SendGrid API key. Only required if `email_provider_name` is `sendgrid`. | `string` | `""` | no | +| [session\_cookie\_mode](#input\_session\_cookie\_mode) | The session cookie mode. | `string` | `"persistent"` | no | +| [session\_lifetime](#input\_session\_lifetime) | The session lifetime in hours. | `number` | `168` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [support\_email](#input\_support\_email) | The email address to be displayed in the Auth0 Universal Login page. | `string` | n/a | yes | +| [support\_url](#input\_support\_url) | The URL to be displayed in the Auth0 Universal Login page. | `string` | n/a | yes | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [use\_scope\_descriptions\_for\_consent](#input\_use\_scope\_descriptions\_for\_consent) | Whether to use scope descriptions for consent. | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [auth0\_domain](#output\_auth0\_domain) | The Auth0 custom domain | +| [client\_id\_ssm\_path](#output\_client\_id\_ssm\_path) | The SSM parameter path for the Auth0 client ID | +| [client\_secret\_ssm\_path](#output\_client\_secret\_ssm\_path) | The SSM parameter path for the Auth0 client secret | +| [domain\_ssm\_path](#output\_domain\_ssm\_path) | The SSM parameter path for the Auth0 domain | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/auth0) - + Cloud Posse's upstream component +- [Auth0 Terraform Provider](https://registry.terraform.io/providers/auth0/auth0/latest/) +- [Auth0 Documentation](https://auth0.com/docs/) + +[](https://cpco.io/component) diff --git a/modules/auth0/tenant/context.tf b/modules/auth0/tenant/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/auth0/tenant/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/auth0/tenant/main.tf b/modules/auth0/tenant/main.tf new file mode 100644 index 000000000..79687d1c8 --- /dev/null +++ b/modules/auth0/tenant/main.tf @@ -0,0 +1,117 @@ +locals { + enabled = module.this.enabled + email_provider_enabled = length(var.email_provider_name) > 0 && local.enabled + + name = length(module.this.name) > 0 ? module.this.name : "auth0" + domain_name = format("%s.%s", local.name, module.dns_gbl_delegated.outputs.default_domain_name) + + friendly_name = length(var.friendly_name) > 0 ? var.friendly_name : module.this.id +} + +# Chicken before the egg... +# +# The tenant must exist before we can manage Auth0 with Terraform, +# but the tenant is not a resource identifiable by an ID within the Auth0 Management API! +# +# However, we can import it using a random string. On first run, we import the existing tenant +# using a random string. It does not matter what this value is. Terraform will use the same +# tenant as the Auth0 application for the Terraform Auth0 Provider. +# +# https://registry.terraform.io/providers/auth0/auth0/latest/docs/resources/tenant#import +import { + to = auth0_tenant.this[0] + id = "f6615002-81ff-49f7-afd3-8814d07af4fa" +} + +resource "auth0_tenant" "this" { + count = local.enabled ? 1 : 0 + + friendly_name = local.friendly_name + picture_url = var.picture_url + support_email = var.support_email + support_url = var.support_url + + allowed_logout_urls = var.allowed_logout_urls + idle_session_lifetime = var.idle_session_lifetime + session_lifetime = var.session_lifetime + sandbox_version = var.sandbox_version + enabled_locales = var.enabled_locales + default_redirection_uri = var.default_redirection_uri + + flags { + disable_clickjack_protection_headers = var.disable_clickjack_protection_headers + enable_public_signup_user_exists_error = var.enable_public_signup_user_exists_error + use_scope_descriptions_for_consent = var.use_scope_descriptions_for_consent + no_disclose_enterprise_connections = var.no_disclose_enterprise_connections + disable_management_api_sms_obfuscation = var.disable_management_api_sms_obfuscation + disable_fields_map_fix = var.disable_fields_map_fix + } + + session_cookie { + mode = var.session_cookie_mode + } + + sessions { + oidc_logout_prompt_enabled = var.oidc_logout_prompt_enabled + } +} + +resource "auth0_custom_domain" "this" { + count = local.enabled ? 1 : 0 + + domain = local.domain_name + type = "auth0_managed_certs" +} + +resource "aws_route53_record" "this" { + count = local.enabled ? 1 : 0 + + zone_id = module.dns_gbl_delegated.outputs.default_dns_zone_id + name = local.domain_name + type = try(upper(auth0_custom_domain.this[0].verification[0].methods[0].name), null) + ttl = "300" + records = local.enabled ? [ + auth0_custom_domain.this[0].verification[0].methods[0].record + ] : [] +} + +resource "auth0_custom_domain_verification" "this" { + count = local.enabled ? 1 : 0 + + custom_domain_id = auth0_custom_domain.this[0].id + + timeouts { + create = "15m" + } + + depends_on = [ + aws_route53_record.this, + ] +} + +resource "auth0_prompt" "this" { + count = local.enabled ? 1 : 0 + + universal_login_experience = var.auth0_prompt_experience +} + +data "aws_ssm_parameter" "sendgrid_api_key" { + count = local.email_provider_enabled ? 1 : 0 + + name = var.sendgrid_api_key_ssm_path +} + +resource "auth0_email_provider" "this" { + count = local.email_provider_enabled ? 1 : 0 + + name = var.email_provider_name + enabled = local.email_provider_enabled + default_from_address = var.email_provider_default_from_address + + dynamic "credentials" { + for_each = var.email_provider_name == "sendgrid" ? ["1"] : [] + content { + api_key = data.aws_ssm_parameter.sendgrid_api_key[0].value + } + } +} diff --git a/modules/auth0/tenant/outputs.tf b/modules/auth0/tenant/outputs.tf new file mode 100644 index 000000000..b51187dc9 --- /dev/null +++ b/modules/auth0/tenant/outputs.tf @@ -0,0 +1,19 @@ +output "domain_ssm_path" { + value = local.auth0_domain_ssm_path + description = "The SSM parameter path for the Auth0 domain" +} + +output "client_id_ssm_path" { + value = local.auth0_client_id_ssm_path + description = "The SSM parameter path for the Auth0 client ID" +} + +output "client_secret_ssm_path" { + value = local.auth0_client_secret_ssm_path + description = "The SSM parameter path for the Auth0 client secret" +} + +output "auth0_domain" { + value = local.domain_name + description = "The Auth0 custom domain" +} diff --git a/modules/auth0/tenant/provider-auth0.tf b/modules/auth0/tenant/provider-auth0.tf new file mode 100644 index 000000000..161849353 --- /dev/null +++ b/modules/auth0/tenant/provider-auth0.tf @@ -0,0 +1,36 @@ +locals { + auth0_domain_ssm_path = local.enabled && length(var.provider_ssm_base_path) == 0 ? "/${module.this.id}/domain" : "/${var.provider_ssm_base_path}/domain" + auth0_client_id_ssm_path = local.enabled && length(var.provider_ssm_base_path) == 0 ? "/${module.this.id}/client_id" : "/${var.provider_ssm_base_path}/client_id" + auth0_client_secret_ssm_path = local.enabled && length(var.provider_ssm_base_path) == 0 ? "/${module.this.id}/client_secret" : "/${var.provider_ssm_base_path}/client_secret" +} + +variable "provider_ssm_base_path" { + type = string + description = "The base path for the SSM parameters. If not defined, this is set to the module context ID. This is also required when `var.enabled` is set to `false`" + default = "" +} + +variable "auth0_debug" { + type = bool + description = "Enable debug mode for the Auth0 provider" + default = true +} + +data "aws_ssm_parameter" "auth0_domain" { + name = local.auth0_domain_ssm_path +} + +data "aws_ssm_parameter" "auth0_client_id" { + name = local.auth0_client_id_ssm_path +} + +data "aws_ssm_parameter" "auth0_client_secret" { + name = local.auth0_client_secret_ssm_path +} + +provider "auth0" { + domain = data.aws_ssm_parameter.auth0_domain.value + client_id = data.aws_ssm_parameter.auth0_client_id.value + client_secret = data.aws_ssm_parameter.auth0_client_secret.value + debug = var.auth0_debug +} diff --git a/modules/auth0/tenant/providers.tf b/modules/auth0/tenant/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/auth0/tenant/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/auth0/tenant/remote-state.tf b/modules/auth0/tenant/remote-state.tf new file mode 100644 index 000000000..1921826eb --- /dev/null +++ b/modules/auth0/tenant/remote-state.tf @@ -0,0 +1,9 @@ +module "dns_gbl_delegated" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + environment = "gbl" + component = "dns-delegated" + + context = module.this.context +} diff --git a/modules/auth0/tenant/variables.tf b/modules/auth0/tenant/variables.tf new file mode 100644 index 000000000..960647ab6 --- /dev/null +++ b/modules/auth0/tenant/variables.tf @@ -0,0 +1,134 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "friendly_name" { + type = string + description = "The friendly name of the Auth0 tenant. If not provided, the module context ID will be used." + default = "" +} + +variable "picture_url" { + type = string + description = "The URL of the picture to be displayed in the Auth0 Universal Login page." + default = "https://cloudposse.com/wp-content/uploads/2017/07/CloudPosse2-TRANSAPRENT.png" +} + +variable "support_email" { + type = string + description = "The email address to be displayed in the Auth0 Universal Login page." +} + +variable "support_url" { + type = string + description = "The URL to be displayed in the Auth0 Universal Login page." +} + +variable "allowed_logout_urls" { + type = list(string) + description = "The URLs that Auth0 can redirect to after logout." + default = [] +} + +variable "idle_session_lifetime" { + type = number + description = "The idle session lifetime in hours." + default = 72 +} + +variable "session_lifetime" { + type = number + description = "The session lifetime in hours." + default = 168 +} + +variable "sandbox_version" { + type = string + description = "The sandbox version." + default = "18" +} + +variable "enabled_locales" { + type = list(string) + description = "The enabled locales." + default = ["en"] +} + +variable "default_redirection_uri" { + type = string + description = "The default redirection URI." + default = "" +} + +variable "disable_clickjack_protection_headers" { + type = bool + description = "Whether to disable clickjack protection headers." + default = true +} + +variable "enable_public_signup_user_exists_error" { + type = bool + description = "Whether to enable public signup user exists error." + default = true +} + +variable "use_scope_descriptions_for_consent" { + type = bool + description = "Whether to use scope descriptions for consent." + default = false +} + +variable "no_disclose_enterprise_connections" { + type = bool + description = "Whether to disclose enterprise connections." + default = false +} + +variable "disable_management_api_sms_obfuscation" { + type = bool + description = "Whether to disable management API SMS obfuscation." + default = false +} + +variable "disable_fields_map_fix" { + type = bool + description = "Whether to disable fields map fix." + default = false +} + +variable "session_cookie_mode" { + type = string + description = "The session cookie mode." + default = "persistent" +} + +variable "oidc_logout_prompt_enabled" { + type = bool + description = "Whether the OIDC logout prompt is enabled." + default = false +} + +variable "email_provider_name" { + type = string + description = "The name of the email provider. If not defined, no email provider will be created." + default = "" +} + +variable "email_provider_default_from_address" { + type = string + description = "The default from address for the email provider." + default = "" +} + +variable "sendgrid_api_key_ssm_path" { + type = string + description = "The SSM path to the SendGrid API key. Only required if `email_provider_name` is `sendgrid`." + default = "" +} + +variable "auth0_prompt_experience" { + type = string + description = "Which prompt login experience to use. Options include classic and new." + default = "new" +} diff --git a/modules/auth0/tenant/versions.tf b/modules/auth0/tenant/versions.tf new file mode 100644 index 000000000..3894f08a9 --- /dev/null +++ b/modules/auth0/tenant/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + auth0 = { + source = "auth0/auth0" + version = ">= 1.0.0" + } + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/aws-backup/README.md b/modules/aws-backup/README.md index 05695b5d1..3eed64d8c 100644 --- a/modules/aws-backup/README.md +++ b/modules/aws-backup/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/aws-backup + - layer/data + - provider/aws +--- + # Component: `aws-backup` This component is responsible for provisioning an AWS Backup Plan. It creates a schedule for backing up given ARNs. @@ -8,39 +15,152 @@ This component is responsible for provisioning an AWS Backup Plan. It creates a Here's an example snippet for how to use this component. +### Component Abstraction and Separation + +By separating the "common" settings from the component, we can first provision the IAM Role and AWS Backup Vault to +prepare resources for future use without incuring cost. + +For example, `stacks/catalog/aws-backup/common`: + ```yaml +# This configuration creates the AWS Backup Vault and IAM Role, and does not incur any cost on its own. +# See: https://aws.amazon.com/backup/pricing/ components: terraform: aws-backup: + metadata: + type: abstract + settings: + spacelift: + workspace_enabled: true + vars: {} + + aws-backup/common: + metadata: + component: aws-backup + inherits: + - aws-backup vars: - # https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html - schedule: cron(0 0 * * ? *) # Daily At 12:00 AM UTC - start_window: 60 # Minutes - completion_window: 240 # Minutes - cold_storage_after: null # Days - delete_after: 14 # Days - destination_vault_arn: null # Copy to another Region's Vault - copy_action_cold_storage_after: null # Copy to another Region's Vault Cold Storage Config (Days) - copy_action_delete_after: null # Copy to another Region's Vault Persistence Config (Days) - backup_resources: [] - selection_tags: - - type: "STRINGEQUALS" - key: "aws-backup/resource_schedule" - value: "daily-14day-backup" + enabled: true + iam_role_enabled: true # this will be reused + vault_enabled: true # this will be reused + plan_enabled: false +## Please be careful when enabling backup_vault_lock_configuration, +# backup_vault_lock_configuration: +## `changeable_for_days` enables compliance mode and once the lock is set, the retention policy cannot be changed unless through account deletion! +# changeable_for_days: 36500 +# max_retention_days: 365 +# min_retention_days: 1 ``` -Since most of these values are shared and common, we can put them in a `catalog/aws-backup/` yaml file and share them across environments. +Then if we would like to deploy the component into a given stacks we can import the following to deploy our backup +plans. + +Since most of these values are shared and common, we can put them in a `catalog/aws-backup/` yaml file and share them +across environments. This makes deploying the same configuration to multiple environments easy. -Deploying to a new stack (environment) then only requires: +`stacks/catalog/aws-backup/defaults`: + ```yaml import: - - catalog/aws-backup/aws-backup-nonprod + - catalog/aws-backup/common components: terraform: - aws-backup: {} + aws-backup/plan-defaults: + metadata: + component: aws-backup + type: abstract + settings: + spacelift: + workspace_enabled: true + depends_on: + - aws-backup/common + vars: + enabled: true + iam_role_enabled: false # reuse from aws-backup-vault + vault_enabled: false # reuse from aws-backup-vault + plan_enabled: true + plan_name_suffix: aws-backup-defaults + + aws-backup/daily-plan: + metadata: + component: aws-backup + inherits: + - aws-backup/plan-defaults + vars: + plan_name_suffix: aws-backup-daily + # https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html + rules: + - name: "plan-daily" + schedule: "cron(0 5 ? * * *)" + start_window: 320 # 60 * 8 # minutes + completion_window: 10080 # 60 * 24 * 7 # minutes + lifecycle: + delete_after: 35 # 7 * 5 # days + selection_tags: + - type: STRINGEQUALS + key: aws-backup/efs + value: daily + - type: STRINGEQUALS + key: aws-backup/rds + value: daily + + aws-backup/weekly-plan: + metadata: + component: aws-backup + inherits: + - aws-backup/plan-defaults + vars: + plan_name_suffix: aws-backup-weekly + # https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html + rules: + - name: "plan-weekly" + schedule: "cron(0 5 ? * SAT *)" + start_window: 320 # 60 * 8 # minutes + completion_window: 10080 # 60 * 24 * 7 # minutes + lifecycle: + delete_after: 90 # 30 * 3 # days + selection_tags: + - type: STRINGEQUALS + key: aws-backup/efs + value: weekly + - type: STRINGEQUALS + key: aws-backup/rds + value: weekly + + aws-backup/monthly-plan: + metadata: + component: aws-backup + inherits: + - aws-backup/plan-defaults + vars: + plan_name_suffix: aws-backup-monthly + # https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html + rules: + - name: "plan-monthly" + schedule: "cron(0 5 1 * ? *)" + start_window: 320 # 60 * 8 # minutes + completion_window: 10080 # 60 * 24 * 7 # minutes + lifecycle: + delete_after: 2555 # 365 * 7 # days + cold_storage_after: 90 # 30 * 3 # days + selection_tags: + - type: STRINGEQUALS + key: aws-backup/efs + value: monthly + - type: STRINGEQUALS + key: aws-backup/rds + value: monthly +``` + +Deploying to a new stack (environment) then only requires: + +```yaml +import: + - catalog/aws-backup/defaults ``` The above configuration can be used to deploy a new backup to a new region. @@ -49,7 +169,8 @@ The above configuration can be used to deploy a new backup to a new region. ### Adding Resources to the Backup - Adding Tags -Once an `aws-backup` with a plan and `selection_tags` has been established we can begin adding resources for it to backup by using the tagging method. +Once an `aws-backup` with a plan and `selection_tags` has been established we can begin adding resources for it to +backup by using the tagging method. This only requires that we add tags to the resources we wish to backup, which can be done with the following snippet: @@ -64,11 +185,13 @@ components: Just ensure the tag key-value pair matches what was added to your backup plan and aws will take care of the rest. - ### Copying across regions -If we want to create a backup vault in another region that we can copy to, then we need to create another vault, and then specify that we want to copy to it. + +If we want to create a backup vault in another region that we can copy to, then we need to create another vault, and +then specify that we want to copy to it. To create a vault in a region simply: + ```yaml components: terraform: @@ -77,24 +200,76 @@ components: plan_enabled: false # disables the plan (which schedules resource backups) ``` -This will output an arn - which you can then use as the copy destination, as seen in the following snippet: +This will output an ARN - which you can then use as the destination in the rule object's `copy_action` (it will be +specific to that particular plan), as seen in the following snippet: + ```yaml components: terraform: - aws-backup: + aws-backup/plan-with-cross-region-replication: + metadata: + component: aws-backup + inherits: + - aws-backup/plan-defaults vars: - destination_vault_arn: arn:aws:backup::111111111111:backup-vault:-- - copy_action_delete_after: 14 + plan_name_suffix: aws-backup-cross-region + # https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html + rules: + - name: "plan-cross-region" + schedule: "cron(0 5 ? * * *)" + start_window: 320 # 60 * 8 # minutes + completion_window: 10080 # 60 * 24 * 7 # minutes + lifecycle: + delete_after: 35 # 7 * 5 # days + copy_action: + destination_vault_arn: "arn:aws:backup::111111111111:backup-vault:--" + lifecycle: + delete_after: 35 ``` +### Backup Lock Configuration + +To enable backup lock configuration, you can use the following snippet: + +- [AWS Backup Vault Lock](https://docs.aws.amazon.com/aws-backup/latest/devguide/vault-lock.html) + +#### Compliance Mode + +Vaults locked in compliance mode cannot be deleted once the cooling-off period ("grace time") expires. During grace +time, you can still remove the vault lock and change the lock configuration. + +To enable **Compliance Mode**, set `changeable_for_days` to a value greater than 0. Once the lock is set, the retention +policy cannot be changed unless through account deletion! +```yaml +# Please be careful when enabling backup_vault_lock_configuration, +backup_vault_lock_configuration: + # `changeable_for_days` enables compliance mode and once the lock is set, the retention policy cannot be changed unless through account deletion! + changeable_for_days: 36500 + max_retention_days: 365 + min_retention_days: 1 +``` + +#### Governance Mode + +Vaults locked in governance mode can have the lock removed by users with sufficient IAM permissions. + +To enable **governance mode** + +```yaml +backup_vault_lock_configuration: + max_retention_days: 365 + min_retention_days: 1 +``` + + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.0 | -| [aws](#requirement\_aws) | >= 2.0 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | ## Providers @@ -104,9 +279,9 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [backup](#module\_backup) | cloudposse/backup/aws | 0.8.1 | +| [backup](#module\_backup) | cloudposse/backup/aws | 1.0.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [this](#module\_this) | cloudposse/label/null | 0.24.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -116,39 +291,34 @@ No resources. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional tags for appending to tags\_as\_list\_of\_maps. Not added to `tags`. | `map(string)` | `{}` | no | -| [attributes](#input\_attributes) | Additional attributes (e.g. `1`) | `list(string)` | `[]` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [advanced\_backup\_setting](#input\_advanced\_backup\_setting) | An object that specifies backup options for each resource type. |
object({
backup_options = string
resource_type = string
})
| `null` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [backup\_resources](#input\_backup\_resources) | An array of strings that either contain Amazon Resource Names (ARNs) or match patterns of resources to assign to a backup plan | `list(string)` | `[]` | no | -| [cold\_storage\_after](#input\_cold\_storage\_after) | Specifies the number of days after creation that a recovery point is moved to cold storage | `number` | `null` | no | -| [completion\_window](#input\_completion\_window) | The amount of time AWS Backup attempts a backup before canceling the job and returning an error. Must be at least 60 minutes greater than `start_window` | `number` | `null` | no | -| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {}
}
| no | -| [copy\_action\_cold\_storage\_after](#input\_copy\_action\_cold\_storage\_after) | For copy operation, specifies the number of days after creation that a recovery point is moved to cold storage | `number` | `null` | no | -| [copy\_action\_delete\_after](#input\_copy\_action\_delete\_after) | For copy operation, specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than `copy_action_cold_storage_after` | `number` | `null` | no | -| [delete\_after](#input\_delete\_after) | Specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than `cold_storage_after` | `number` | `null` | no | -| [delimiter](#input\_delimiter) | Delimiter to be used between `namespace`, `environment`, `stage`, `name` and `attributes`.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | -| [destination\_vault\_arn](#input\_destination\_vault\_arn) | An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup | `string` | `null` | no | +| [backup\_vault\_lock\_configuration](#input\_backup\_vault\_lock\_configuration) | The backup vault lock configuration, each vault can have one vault lock in place. This will enable Backup Vault Lock on an AWS Backup vault it prevents the deletion of backup data for the specified retention period. During this time, the backup data remains immutable and cannot be deleted or modified."
`changeable_for_days` - The number of days before the lock date. If omitted creates a vault lock in `governance` mode, otherwise it will create a vault lock in `compliance` mode. |
object({
changeable_for_days = optional(number)
max_retention_days = optional(number)
min_retention_days = optional(number)
})
| `null` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | -| [environment](#input\_environment) | Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [iam\_role\_enabled](#input\_iam\_role\_enabled) | Whether or not to create a new IAM Role and Policy Attachment | `bool` | `true` | no | -| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for default, which is `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | | [kms\_key\_arn](#input\_kms\_key\_arn) | The server-side encryption key that is used to protect your backups | `string` | `null` | no | -| [label\_key\_case](#input\_label\_key\_case) | The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | -| [label\_order](#input\_label\_order) | The naming order of the id output and Name tag.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 5 elements, but at least one must be present. | `list(string)` | `null` | no | -| [label\_value\_case](#input\_label\_value\_case) | The letter case of output label values (also used in `tags` and `id`).
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Default value: `lower`. | `string` | `null` | no | -| [name](#input\_name) | Solution name, e.g. 'app' or 'jenkins' | `string` | `null` | no | -| [namespace](#input\_namespace) | Namespace, which could be your organization name or abbreviation, e.g. 'eg' or 'cp' | `string` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [plan\_enabled](#input\_plan\_enabled) | Whether or not to create a new Plan | `bool` | `true` | no | | [plan\_name\_suffix](#input\_plan\_name\_suffix) | The string appended to the plan name | `string` | `null` | no | -| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Regex to replace chars with empty string in `namespace`, `environment`, `stage` and `name`.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [schedule](#input\_schedule) | A CRON expression specifying when AWS Backup initiates a backup job | `string` | `null` | no | +| [rules](#input\_rules) | An array of rule maps used to define schedules in a backup plan |
list(object({
name = string
schedule = optional(string)
enable_continuous_backup = optional(bool)
start_window = optional(number)
completion_window = optional(number)
lifecycle = optional(object({
cold_storage_after = optional(number)
delete_after = optional(number)
opt_in_to_archive_for_supported_resources = optional(bool)
}))
copy_action = optional(object({
destination_vault_arn = optional(string)
lifecycle = optional(object({
cold_storage_after = optional(number)
delete_after = optional(number)
opt_in_to_archive_for_supported_resources = optional(bool)
}))
}))
}))
| `[]` | no | | [selection\_tags](#input\_selection\_tags) | An array of tag condition objects used to filter resources based on tags for assigning to a backup plan | `list(map(string))` | `[]` | no | -| [stage](#input\_stage) | Stage, e.g. 'prod', 'staging', 'dev', OR 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | -| [start\_window](#input\_start\_window) | The amount of time in minutes before beginning a backup. Minimum value is 60 minutes | `number` | `null` | no | -| [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit','XYZ')` | `map(string)` | `{}` | no | -| [target\_iam\_role\_name](#input\_target\_iam\_role\_name) | Override target IAM Name | `string` | `null` | no | -| [target\_vault\_name](#input\_target\_vault\_name) | Override target Vault Name | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [vault\_enabled](#input\_vault\_enabled) | Whether or not a new Vault should be created | `bool` | `true` | no | ## Outputs @@ -160,12 +330,16 @@ No resources. | [backup\_selection\_id](#output\_backup\_selection\_id) | Backup Selection ID | | [backup\_vault\_arn](#output\_backup\_vault\_arn) | Backup Vault ARN | | [backup\_vault\_id](#output\_backup\_vault\_id) | Backup Vault ID | -| [backup\_vault\_recovery\_points](#output\_backup\_vault\_recovery\_points) | Backup Vault recovery points | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/aws-backup) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/aws-backup) - + Cloud Posse's upstream component [](https://cpco.io/component) + +## Related How-to Guides + +- [How to Enable Cross-Region Backups in AWS-Backup](https://docs.cloudposse.com/layers/data/tutorials/how-to-enable-cross-region-backups-in-aws-backup/) diff --git a/modules/aws-backup/context.tf b/modules/aws-backup/context.tf index 81f99b4e3..5e0ef8856 100644 --- a/modules/aws-backup/context.tf +++ b/modules/aws-backup/context.tf @@ -8,6 +8,8 @@ # Cloud Posse's standard configuration inputs suitable for passing # to Cloud Posse modules. # +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# # Modules should access the whole context as `module.this.context` # to get the input variables with nulls for defaults, # for example `context = module.this.context`, @@ -20,10 +22,11 @@ module "this" { source = "cloudposse/label/null" - version = "0.24.1" # requires Terraform >= 0.13.0 + version = "0.25.0" # requires Terraform >= 0.13.0 enabled = var.enabled namespace = var.namespace + tenant = var.tenant environment = var.environment stage = var.stage name = var.name @@ -36,6 +39,8 @@ module "this" { id_length_limit = var.id_length_limit label_key_case = var.label_key_case label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags context = var.context } @@ -47,6 +52,7 @@ variable "context" { default = { enabled = true namespace = null + tenant = null environment = null stage = null name = null @@ -59,6 +65,15 @@ variable "context" { id_length_limit = null label_key_case = null label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] } description = <<-EOT Single object for setting entire context at once. @@ -88,32 +103,42 @@ variable "enabled" { variable "namespace" { type = string default = null - description = "Namespace, which could be your organization name or abbreviation, e.g. 'eg' or 'cp'" + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" } variable "environment" { type = string default = null - description = "Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT'" + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" } variable "stage" { type = string default = null - description = "Stage, e.g. 'prod', 'staging', 'dev', OR 'source', 'build', 'test', 'deploy', 'release'" + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" } variable "name" { type = string default = null - description = "Solution name, e.g. 'app' or 'jenkins'" + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT } variable "delimiter" { type = string default = null description = <<-EOT - Delimiter to be used between `namespace`, `environment`, `stage`, `name` and `attributes`. + Delimiter to be used between ID elements. Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. EOT } @@ -121,36 +146,64 @@ variable "delimiter" { variable "attributes" { type = list(string) default = [] - description = "Additional attributes (e.g. `1`)" + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT } variable "tags" { type = map(string) default = {} - description = "Additional tags (e.g. `map('BusinessUnit','XYZ')`" + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT } variable "additional_tag_map" { type = map(string) default = {} - description = "Additional tags for appending to tags_as_list_of_maps. Not added to `tags`." + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT } variable "label_order" { type = list(string) default = null description = <<-EOT - The naming order of the id output and Name tag. + The order in which the labels (ID elements) appear in the `id`. Defaults to ["namespace", "environment", "stage", "name", "attributes"]. - You can omit any of the 5 elements, but at least one must be present. - EOT + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT } variable "regex_replace_chars" { type = string default = null description = <<-EOT - Regex to replace chars with empty string in `namespace`, `environment`, `stage` and `name`. + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. EOT } @@ -161,7 +214,7 @@ variable "id_length_limit" { description = <<-EOT Limit `id` to this many characters (minimum 6). Set to `0` for unlimited length. - Set to `null` for default, which is `0`. + Set to `null` for keep the existing setting, which defaults to `0`. Does not affect `id_full`. EOT validation { @@ -174,7 +227,8 @@ variable "label_key_case" { type = string default = null description = <<-EOT - The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`. + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. Possible values: `lower`, `title`, `upper`. Default value: `title`. EOT @@ -189,8 +243,11 @@ variable "label_value_case" { type = string default = null description = <<-EOT - The letter case of output label values (also used in `tags` and `id`). + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. Default value: `lower`. EOT @@ -199,4 +256,24 @@ variable "label_value_case" { error_message = "Allowed values: `lower`, `title`, `upper`, `none`." } } + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + #### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/aws-backup/default.auto.tfvars b/modules/aws-backup/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/aws-backup/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/aws-backup/main.tf b/modules/aws-backup/main.tf index 30a49558c..3a64342fd 100644 --- a/modules/aws-backup/main.tf +++ b/modules/aws-backup/main.tf @@ -1,8 +1,6 @@ module "backup" { source = "cloudposse/backup/aws" - version = "0.8.1" - - context = module.this.context + version = "1.0.0" plan_name_suffix = var.plan_name_suffix vault_enabled = var.vault_enabled @@ -12,19 +10,11 @@ module "backup" { backup_resources = var.backup_resources selection_tags = var.selection_tags - schedule = var.schedule - start_window = var.start_window - completion_window = var.completion_window - cold_storage_after = var.cold_storage_after - delete_after = var.delete_after - kms_key_arn = var.kms_key_arn - target_iam_role_name = var.target_iam_role_name + kms_key_arn = var.kms_key_arn - # Copy config to new region - destination_vault_arn = var.destination_vault_arn - copy_action_cold_storage_after = var.copy_action_cold_storage_after - copy_action_delete_after = var.copy_action_delete_after + rules = var.rules + advanced_backup_setting = var.advanced_backup_setting + backup_vault_lock_configuration = var.backup_vault_lock_configuration - target_vault_name = var.target_vault_name + context = module.this.context } - diff --git a/modules/aws-backup/outputs.tf b/modules/aws-backup/outputs.tf index dfaae3046..69fa779e5 100644 --- a/modules/aws-backup/outputs.tf +++ b/modules/aws-backup/outputs.tf @@ -8,11 +8,6 @@ output "backup_vault_arn" { description = "Backup Vault ARN" } -output "backup_vault_recovery_points" { - value = module.backup.backup_vault_recovery_points - description = "Backup Vault recovery points" -} - output "backup_plan_arn" { value = module.backup.backup_plan_arn description = "Backup Plan ARN" @@ -27,4 +22,3 @@ output "backup_selection_id" { value = module.backup.backup_selection_id description = "Backup Selection ID" } - diff --git a/modules/aws-backup/providers.tf b/modules/aws-backup/providers.tf old mode 100755 new mode 100644 index 74cd8f825..ef923e10a --- a/modules/aws-backup/providers.tf +++ b/modules/aws-backup/providers.tf @@ -1,16 +1,19 @@ provider "aws" { region = var.region - profile = coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } } module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} diff --git a/modules/aws-backup/variables.tf b/modules/aws-backup/variables.tf index 359fc1848..1c5755152 100644 --- a/modules/aws-backup/variables.tf +++ b/modules/aws-backup/variables.tf @@ -9,51 +9,16 @@ variable "kms_key_arn" { default = null } -variable "schedule" { - type = string - description = "A CRON expression specifying when AWS Backup initiates a backup job" - default = null -} - -variable "start_window" { - type = number - description = "The amount of time in minutes before beginning a backup. Minimum value is 60 minutes" - default = null -} - -variable "completion_window" { - type = number - description = "The amount of time AWS Backup attempts a backup before canceling the job and returning an error. Must be at least 60 minutes greater than `start_window`" - default = null -} - -variable "cold_storage_after" { - type = number - description = "Specifies the number of days after creation that a recovery point is moved to cold storage" - default = null -} - -variable "delete_after" { - type = number - description = "Specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than `cold_storage_after`" - default = null -} - -variable "destination_vault_arn" { - type = string - description = "An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup" - default = null -} - -variable "copy_action_cold_storage_after" { - type = number - description = "For copy operation, specifies the number of days after creation that a recovery point is moved to cold storage" - default = null -} - -variable "copy_action_delete_after" { - type = number - description = "For copy operation, specifies the number of days after creation that a recovery point is deleted. Must be 90 days greater than `copy_action_cold_storage_after`" +variable "backup_vault_lock_configuration" { + type = object({ + changeable_for_days = optional(number) + max_retention_days = optional(number) + min_retention_days = optional(number) + }) + description = <<-EOT + The backup vault lock configuration, each vault can have one vault lock in place. This will enable Backup Vault Lock on an AWS Backup vault it prevents the deletion of backup data for the specified retention period. During this time, the backup data remains immutable and cannot be deleted or modified." + `changeable_for_days` - The number of days before the lock date. If omitted creates a vault lock in `governance` mode, otherwise it will create a vault lock in `compliance` mode. + EOT default = null } @@ -75,12 +40,6 @@ variable "plan_name_suffix" { default = null } -variable "target_vault_name" { - type = string - description = "Override target Vault Name" - default = null -} - variable "vault_enabled" { type = bool description = "Whether or not a new Vault should be created" @@ -99,8 +58,37 @@ variable "iam_role_enabled" { default = true } -variable "target_iam_role_name" { - type = string - description = "Override target IAM Name" + +variable "rules" { + type = list(object({ + name = string + schedule = optional(string) + enable_continuous_backup = optional(bool) + start_window = optional(number) + completion_window = optional(number) + lifecycle = optional(object({ + cold_storage_after = optional(number) + delete_after = optional(number) + opt_in_to_archive_for_supported_resources = optional(bool) + })) + copy_action = optional(object({ + destination_vault_arn = optional(string) + lifecycle = optional(object({ + cold_storage_after = optional(number) + delete_after = optional(number) + opt_in_to_archive_for_supported_resources = optional(bool) + })) + })) + })) + description = "An array of rule maps used to define schedules in a backup plan" + default = [] +} + +variable "advanced_backup_setting" { + type = object({ + backup_options = string + resource_type = string + }) + description = "An object that specifies backup options for each resource type." default = null } diff --git a/modules/aws-backup/versions.tf b/modules/aws-backup/versions.tf index 5b2c49b90..b5920b7b1 100644 --- a/modules/aws-backup/versions.tf +++ b/modules/aws-backup/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 2.0" + version = ">= 4.9.0" } } } diff --git a/modules/aws-config/README.md b/modules/aws-config/README.md new file mode 100644 index 000000000..355ebad34 --- /dev/null +++ b/modules/aws-config/README.md @@ -0,0 +1,267 @@ +--- +tags: + - component/aws-config + - layer/security-and-compliance + - provider/aws +--- + +# Component: `aws-config` + +This component is responsible for configuring AWS Config. + +AWS Config service enables you to track changes to your AWS resources over time. It continuously monitors and records +configuration changes to your AWS resources and provides you with a detailed view of the relationships between those +resources. With AWS Config, you can assess, audit, and evaluate the configurations of your AWS resources for compliance, +security, and governance purposes. + +Some of the key features of AWS Config include: + +- Configuration history: AWS Config maintains a detailed history of changes to your AWS resources, allowing you to see + when changes were made, who made them, and what the changes were. +- Configuration snapshots: AWS Config can take periodic snapshots of your AWS resources configurations, giving you a + point-in-time view of their configuration. +- Compliance monitoring: AWS Config provides a range of pre-built rules and checks to monitor your resources for + compliance with best practices and industry standards. +- Relationship mapping: AWS Config can map the relationships between your AWS resources, enabling you to see how changes + to one resource can impact others. +- Notifications and alerts: AWS Config can send notifications and alerts when changes are made to your AWS resources + that could impact their compliance or security posture. + +> [!WARNING] +> +> #### AWS Config Limitations +> +> You'll also want to be aware of some limitations with AWS Config: +> +> - The maximum number of AWS Config rules that can be evaluated in a single account is 1000. +> - This can be mitigated by removing rules that are duplicated across packs. You'll have to manually search for these +> duplicates. +> - You can also look for rules that do not apply to any resources and remove those. You'll have to manually click +> through rules in the AWS Config interface to see which rules are not being evaluated. +> - If you end up still needing more than 1000 rules, one recommendation is to only run packs on a schedule with a +> lambda that removes the pack after results are collected. If you had different schedule for each day of the week, +> that would mean 7000 rules over the week. The aggregators would not be able to handle this, so you would need to +> make sure to store them somewhere else (i.e. S3) so the findings are not lost. +> - See the +> [Audit Manager docs](https://aws.amazon.com/blogs/mt/integrate-across-the-three-lines-model-part-2-transform-aws-config-conformance-packs-into-aws-audit-manager-assessments/) +> if you think you would like to convert conformance packs to custom Audit Manager assessments. +> - The maximum number of AWS Config conformance packs that can be created in a single account is 50. + +Overall, AWS Config provides you with a powerful toolset to help you monitor and manage the configurations of your AWS +resources, ensuring that they remain compliant, secure, and properly configured over time. + +## Prerequisites + +As part of +[CIS AWS Foundations 1.20](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-cis-controls.html#securityhub-cis-controls-1.20), +this component assumes that a designated support IAM role with the following permissions has been deployed to every +account in the organization: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowSupport", + "Effect": "Allow", + "Action": ["support:*"], + "Resource": "*" + }, + { + "Sid": "AllowTrustedAdvisor", + "Effect": "Allow", + "Action": "trustedadvisor:Describe*", + "Resource": "*" + } + ] +} +``` + +Before deploying this AWS Config component `config-bucket` and `cloudtrail-bucket` should be deployed first. + +## Usage + +**Stack Level**: Regional or Global + +This component has a `default_scope` variable for configuring if it will be an organization-wide or account-level +component by default. Note that this can be overridden by the `scope` variable in the `conformance_packs` items. + +> [!TIP] +> +> #### Using the account default_scope +> +> If default_scope == `account`, AWS Config is regional AWS service, so this component needs to be deployed to all +> regions. If an individual `conformance_packs` item has `scope` set to `organization`, that particular pack will be +> deployed to the organization level. + +> [!TIP] +> +> #### Using the organization default_scope +> +> If default_scope == `organization`, AWS Config is global unless overridden in the `conformance_packs` items. You will +> need to update your org to allow the `config-multiaccountsetup.amazonaws.com` service access principal for this to +> work. If you are using our `account` component, just add that principal to the `aws_service_access_principals` +> variable. + +At the AWS Organizational level, the Components designate an account to be the `central collection account` and a single +region to be the `central collection region` so that compliance information can be aggregated into a central location. + +Logs are typically written to the `audit` account and AWS Config deployed into to the `security` account. + +Here's an example snippet for how to use this component: + +```yaml +components: + terraform: + aws-config: + vars: + enabled: true + account_map_tenant: core + az_abbreviation_type: fixed + # In each AWS account, an IAM role should be created in the main region. + # If the main region is set to us-east-1, the value of the var.create_iam_role variable should be true. + # For all other regions, the value of var.create_iam_role should be false. + create_iam_role: false + central_resource_collector_account: core-security + global_resource_collector_region: us-east-1 + config_bucket_env: ue1 + config_bucket_stage: audit + config_bucket_tenant: core + conformance_packs: + - name: Operational-Best-Practices-for-CIS-AWS-v1.4-Level2 + conformance_pack: https://raw.githubusercontent.com/awslabs/aws-config-rules/master/aws-config-conformance-packs/Operational-Best-Practices-for-CIS-AWS-v1.4-Level2.yaml + parameter_overrides: + AccessKeysRotatedParamMaxAccessKeyAge: '45' + - name: Operational-Best-Practices-for-HIPAA-Security.yaml + conformance_pack: https://raw.githubusercontent.com/awslabs/aws-config-rules/master/aws-config-conformance-packs/Operational-Best-Practices-for-HIPAA-Security.yaml + parameter_overrides: + ... + (etc) + managed_rules: + access-keys-rotated: + identifier: ACCESS_KEYS_ROTATED + description: "Checks whether the active access keys are rotated within the number of days specified in maxAccessKeyAge. The rule is NON_COMPLIANT if the access keys have not been rotated for more than maxAccessKeyAge number of days." + input_parameters: + maxAccessKeyAge: "30" + enabled: true + tags: { } +``` + +## Deployment + +Apply to your central region security account + +```sh +atmos terraform plan aws-config-{central-region} --stack core-{central-region}-security -var=create_iam_role=true +``` + +For example when central region is `us-east-1`: + +```sh +atmos terraform plan aws-config-ue1 --stack core-ue1-security -var=create_iam_role=true +``` + +Apply aws-config to all stacks in all stages. + +```sh +atmos terraform plan aws-config-{each region} --stack {each region}-{each stage} +``` + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [awsutils](#requirement\_awsutils) | >= 0.16.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [aws\_config](#module\_aws\_config) | cloudposse/config/aws | 1.1.0 | +| [aws\_config\_label](#module\_aws\_config\_label) | cloudposse/label/null | 0.25.0 | +| [aws\_team\_roles](#module\_aws\_team\_roles) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [config\_bucket](#module\_config\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [conformance\_pack](#module\_conformance\_pack) | cloudposse/config/aws//modules/conformance-pack | 1.1.0 | +| [global\_collector\_region](#module\_global\_collector\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [org\_conformance\_pack](#module\_org\_conformance\_pack) | ./modules/org-conformance-pack | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [utils](#module\_utils) | cloudposse/utils/aws | 1.3.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_partition.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_region.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant](#input\_account\_map\_tenant) | (Optional) The tenant where the account\_map component required by remote-state is deployed. | `string` | `""` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [az\_abbreviation\_type](#input\_az\_abbreviation\_type) | AZ abbreviation type, `fixed` or `short` | `string` | `"fixed"` | no | +| [central\_resource\_collector\_account](#input\_central\_resource\_collector\_account) | The name of the account that is the centralized aggregation account. | `string` | n/a | yes | +| [config\_bucket\_env](#input\_config\_bucket\_env) | The environment of the AWS Config S3 Bucket | `string` | n/a | yes | +| [config\_bucket\_stage](#input\_config\_bucket\_stage) | The stage of the AWS Config S3 Bucket | `string` | n/a | yes | +| [config\_bucket\_tenant](#input\_config\_bucket\_tenant) | (Optional) The tenant of the AWS Config S3 Bucket | `string` | `""` | no | +| [conformance\_packs](#input\_conformance\_packs) | List of conformance packs. Each conformance pack is a map with the following keys: name, conformance\_pack, parameter\_overrides.

For example:
conformance\_packs = [
{
name = "Operational-Best-Practices-for-CIS-AWS-v1.4-Level1"
conformance\_pack = "https://raw.githubusercontent.com/awslabs/aws-config-rules/master/aws-config-conformance-packs/Operational-Best-Practices-for-CIS-AWS-v1.4-Level1.yaml"
parameter\_overrides = {
"AccessKeysRotatedParamMaxAccessKeyAge" = "45"
}
},
{
name = "Operational-Best-Practices-for-CIS-AWS-v1.4-Level2"
conformance\_pack = "https://raw.githubusercontent.com/awslabs/aws-config-rules/master/aws-config-conformance-packs/Operational-Best-Practices-for-CIS-AWS-v1.4-Level2.yaml"
parameter\_overrides = {
"IamPasswordPolicyParamMaxPasswordAge" = "45"
}
}
]

Complete list of AWS Conformance Packs managed by AWSLabs can be found here:
https://github.com/awslabs/aws-config-rules/tree/master/aws-config-conformance-packs |
list(object({
name = string
conformance_pack = string
parameter_overrides = map(string)
scope = optional(string, null)
}))
| `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_iam\_role](#input\_create\_iam\_role) | Flag to indicate whether an IAM Role should be created to grant the proper permissions for AWS Config | `bool` | `false` | no | +| [default\_scope](#input\_default\_scope) | The default scope of the conformance pack. Valid values are `account` and `organization`. | `string` | `"account"` | no | +| [delegated\_accounts](#input\_delegated\_accounts) | The account IDs of other accounts that will send their AWS Configuration or Security Hub data to this account | `set(string)` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [global\_resource\_collector\_region](#input\_global\_resource\_collector\_region) | The region that collects AWS Config data for global resources such as IAM | `string` | n/a | yes | +| [iam\_role\_arn](#input\_iam\_role\_arn) | The ARN for an IAM Role AWS Config uses to make read or write requests to the delivery channel and to describe the
AWS resources associated with the account. This is only used if create\_iam\_role is false.

If you want to use an existing IAM Role, set the variable to the ARN of the existing role and set create\_iam\_role to `false`.

See the AWS Docs for further information:
http://docs.aws.amazon.com/config/latest/developerguide/iamrole-permissions.html | `string` | `null` | no | +| [iam\_roles\_environment\_name](#input\_iam\_roles\_environment\_name) | The name of the environment where the IAM roles are provisioned | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [managed\_rules](#input\_managed\_rules) | A list of AWS Managed Rules that should be enabled on the account.

See the following for a list of possible rules to enable:
https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html

Example:
managed_rules = {
access-keys-rotated = {
identifier = "ACCESS_KEYS_ROTATED"
description = "Checks whether the active access keys are rotated within the number of days specified in maxAccessKeyAge. The rule is NON_COMPLIANT if the access keys have not been rotated for more than maxAccessKeyAge number of days."
input_parameters = {
maxAccessKeyAge : "90"
}
enabled = true
tags = {}
}
}
|
map(object({
description = string
identifier = string
input_parameters = any
tags = map(string)
enabled = bool
}))
| `{}` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [privileged](#input\_privileged) | True if the default provider already has access to the backend | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (master) account | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aws\_config\_configuration\_recorder\_id](#output\_aws\_config\_configuration\_recorder\_id) | The ID of the AWS Config Recorder | +| [aws\_config\_iam\_role](#output\_aws\_config\_iam\_role) | The ARN of the IAM Role used for AWS Config | +| [storage\_bucket\_arn](#output\_storage\_bucket\_arn) | Storage Config bucket ARN | +| [storage\_bucket\_id](#output\_storage\_bucket\_id) | Storage Config bucket ID | + + +## References + +- [AWS Config Documentation](https://docs.aws.amazon.com/config/index.html) +- [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/aws-config) +- [Conformance Packs documentation](https://docs.aws.amazon.com/config/latest/developerguide/conformance-packs.html) +- [AWS Managed Sample Conformance Packs](https://github.com/awslabs/aws-config-rules/tree/master/aws-config-conformance-packs) + +[](https://cpco.io/component) diff --git a/modules/aws-config/context.tf b/modules/aws-config/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/aws-config/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/aws-config/main.tf b/modules/aws-config/main.tf new file mode 100644 index 000000000..b36912ada --- /dev/null +++ b/modules/aws-config/main.tf @@ -0,0 +1,98 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map + s3_bucket = module.config_bucket.outputs + is_global_collector_region = join("", data.aws_region.this[*].name) == var.global_resource_collector_region + create_iam_role = var.create_iam_role && local.is_global_collector_region + config_iam_role_template = "arn:${local.partition}:iam::${join("", data.aws_caller_identity.this[*].account_id)}:role/${module.aws_config_label.id}" + config_iam_role_from_state = local.create_iam_role ? null : join("", module.global_collector_region[*].outputs.aws_config_iam_role) + config_iam_role_external = var.iam_role_arn != null ? var.iam_role_arn : local.config_iam_role_from_state + config_iam_role_arn = local.create_iam_role ? local.config_iam_role_template : local.config_iam_role_external + central_resource_collector_account = local.account_map[var.central_resource_collector_account] + delegated_accounts = var.delegated_accounts != null ? var.delegated_accounts : toset(values(local.account_map)) + partition = join("", data.aws_partition.this[*].partition) +} + +data "aws_caller_identity" "this" { + count = local.enabled ? 1 : 0 +} + +data "aws_region" "this" { + count = local.enabled ? 1 : 0 +} + +data "aws_partition" "this" { + count = local.enabled ? 1 : 0 +} + +module "aws_config_label" { + source = "cloudposse/label/null" + version = "0.25.0" + attributes = ["config"] + + context = module.this.context +} + +module "utils" { + source = "cloudposse/utils/aws" + version = "1.3.0" + + context = module.this.context +} + +locals { + packs = [for pack in var.conformance_packs : merge(pack, { scope = coalesce(pack.scope, var.default_scope) })] + account_packs = { for pack in local.packs : pack.name => pack if pack.scope == "account" } + org_packs = { for pack in local.packs : pack.name => pack if pack.scope == "organization" } +} + +module "conformance_pack" { + source = "cloudposse/config/aws//modules/conformance-pack" + version = "1.1.0" + + for_each = local.enabled ? local.account_packs : {} + + name = each.key + conformance_pack = each.value.conformance_pack + parameter_overrides = each.value.parameter_overrides + + depends_on = [ + module.aws_config + ] + + context = module.this.context +} + +module "org_conformance_pack" { + source = "./modules/org-conformance-pack" + + for_each = local.enabled ? local.org_packs : {} + + name = each.key + conformance_pack = each.value.conformance_pack + parameter_overrides = each.value.parameter_overrides + + depends_on = [ + module.aws_config + ] + + context = module.this.context +} + +module "aws_config" { + source = "cloudposse/config/aws" + version = "1.1.0" + + s3_bucket_id = local.s3_bucket.config_bucket_id + s3_bucket_arn = local.s3_bucket.config_bucket_arn + create_iam_role = local.create_iam_role + iam_role_arn = local.config_iam_role_arn + managed_rules = var.managed_rules + create_sns_topic = true + + global_resource_collector_region = var.global_resource_collector_region + central_resource_collector_account = local.central_resource_collector_account + child_resource_collector_accounts = local.delegated_accounts + + context = module.this.context +} diff --git a/modules/aws-config/modules/org-conformance-pack/README.md b/modules/aws-config/modules/org-conformance-pack/README.md new file mode 100644 index 000000000..5e196d5ff --- /dev/null +++ b/modules/aws-config/modules/org-conformance-pack/README.md @@ -0,0 +1,48 @@ +# AWS Config Conformance Pack + +This module deploys a +[Conformance Pack](https://docs.aws.amazon.com/config/latest/developerguide/conformance-packs.html). A conformance pack +is a collection of AWS Config rules and remediation actions that can be easily deployed as a single entity in an account +and a Region or across an organization in AWS Organizations. Conformance packs are created by authoring a YAML template +that contains the list of AWS Config managed or custom rules and remediation actions. + +The Conformance Pack cannot be deployed until AWS Config is deployed, which can be deployed using the +[aws-config](../../) component. + +## Usage + +First, make sure your root `account` allows the service access principal `config-multiaccountsetup.amazonaws.com` to +update child organizations. You can see the docs on the account module here: +[aws_service_access_principals](https://docs.cloudposse.com/components/library/aws/account/#input_aws_service_access_principals) + +Then you have two options: + +- Set the `default_scope` of the parent `aws-config` component to be `organization` (can be overridden by the `scope` of + each `conformance_packs` item) +- Set the `scope` of the `conformance_packs` item to be `organization` + +An example YAML stack config for Atmos follows. Note, that both options are shown for demonstration purposes. In +practice you should only have one `aws-config` per account: + +```yaml +components: + terraform: + account: + vars: + aws_service_access_principals: + - config-multiaccountsetup.amazonaws.com + + aws-config/cis/level-1: + vars: + conformance_packs: + - name: Operational-Best-Practices-for-CIS-AWS-v1.4-Level1 + conformance_pack: https://raw.githubusercontent.com/awslabs/aws-config-rules/master/aws-config-conformance-packs/Operational-Best-Practices-for-CIS-AWS-v1.4-Level1.yaml + scope: organization + + aws-config/cis/level-2: + vars: + default_scope: organization + conformance_packs: + - name: Operational-Best-Practices-for-CIS-AWS-v1.4-Level2 + conformance_pack: https://raw.githubusercontent.com/awslabs/aws-config-rules/master/aws-config-conformance-packs/Operational-Best-Practices-for-CIS-AWS-v1.4-Level2.yaml +``` diff --git a/modules/aws-config/modules/org-conformance-pack/context.tf b/modules/aws-config/modules/org-conformance-pack/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/aws-config/modules/org-conformance-pack/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/aws-config/modules/org-conformance-pack/main.tf b/modules/aws-config/modules/org-conformance-pack/main.tf new file mode 100644 index 000000000..b2c021536 --- /dev/null +++ b/modules/aws-config/modules/org-conformance-pack/main.tf @@ -0,0 +1,17 @@ +resource "aws_config_organization_conformance_pack" "default" { + name = module.this.name + + dynamic "input_parameter" { + for_each = var.parameter_overrides + content { + parameter_name = input_parameter.key + parameter_value = input_parameter.value + } + } + + template_body = data.http.conformance_pack.body +} + +data "http" "conformance_pack" { + url = var.conformance_pack +} diff --git a/modules/aws-config/modules/org-conformance-pack/outputs.tf b/modules/aws-config/modules/org-conformance-pack/outputs.tf new file mode 100644 index 000000000..f3b7cef11 --- /dev/null +++ b/modules/aws-config/modules/org-conformance-pack/outputs.tf @@ -0,0 +1,4 @@ +output "arn" { + value = aws_config_organization_conformance_pack.default.arn + description = "ARN for the AWS Config Organization Conformance Pack" +} diff --git a/modules/aws-config/modules/org-conformance-pack/variables.tf b/modules/aws-config/modules/org-conformance-pack/variables.tf new file mode 100644 index 000000000..cb92dbf5c --- /dev/null +++ b/modules/aws-config/modules/org-conformance-pack/variables.tf @@ -0,0 +1,10 @@ +variable "conformance_pack" { + type = string + description = "The URL to a Conformance Pack" +} + +variable "parameter_overrides" { + type = map(any) + description = "A map of parameters names to values to override from the template" + default = {} +} diff --git a/modules/aws-config/modules/org-conformance-pack/versions.tf b/modules/aws-config/modules/org-conformance-pack/versions.tf new file mode 100644 index 000000000..cff384723 --- /dev/null +++ b/modules/aws-config/modules/org-conformance-pack/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + + http = { + source = "hashicorp/http" + version = ">= 2.1.0" + } + } +} diff --git a/modules/aws-config/outputs.tf b/modules/aws-config/outputs.tf new file mode 100644 index 000000000..a7a14c8ec --- /dev/null +++ b/modules/aws-config/outputs.tf @@ -0,0 +1,19 @@ +output "aws_config_configuration_recorder_id" { + value = module.aws_config.aws_config_configuration_recorder_id + description = "The ID of the AWS Config Recorder" +} + +output "aws_config_iam_role" { + description = "The ARN of the IAM Role used for AWS Config" + value = local.config_iam_role_arn +} + +output "storage_bucket_id" { + value = module.aws_config.storage_bucket_id + description = "Storage Config bucket ID" +} + +output "storage_bucket_arn" { + value = module.aws_config.storage_bucket_arn + description = "Storage Config bucket ARN" +} diff --git a/modules/aws-config/provider-awsutils.mixin.tf b/modules/aws-config/provider-awsutils.mixin.tf new file mode 100644 index 000000000..70fa8d095 --- /dev/null +++ b/modules/aws-config/provider-awsutils.mixin.tf @@ -0,0 +1,14 @@ +provider "awsutils" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/aws-config/providers.tf b/modules/aws-config/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/aws-config/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/aws-config/remote-state.tf b/modules/aws-config/remote-state.tf new file mode 100644 index 000000000..8dad91122 --- /dev/null +++ b/modules/aws-config/remote-state.tf @@ -0,0 +1,49 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + tenant = (var.account_map_tenant != "") ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} + +module "config_bucket" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "config-bucket" + tenant = (var.config_bucket_tenant != "") ? var.config_bucket_tenant : module.this.tenant + stage = var.config_bucket_stage + environment = var.config_bucket_env + privileged = false + + context = module.this.context +} + +module "global_collector_region" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = !local.enabled || local.is_global_collector_region ? 0 : 1 + + component = "aws-config-${lookup(module.utils.region_az_alt_code_maps["to_${var.az_abbreviation_type}"], var.global_resource_collector_region)}" + stage = module.this.stage + environment = lookup(module.utils.region_az_alt_code_maps["to_${var.az_abbreviation_type}"], var.global_resource_collector_region) + privileged = false + + context = module.this.context +} + +module "aws_team_roles" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "aws-team-roles" + environment = var.iam_roles_environment_name + + context = module.this.context +} diff --git a/modules/aws-config/variables.tf b/modules/aws-config/variables.tf new file mode 100644 index 000000000..367ddc360 --- /dev/null +++ b/modules/aws-config/variables.tf @@ -0,0 +1,173 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "account_map_tenant" { + type = string + default = "" + description = "(Optional) The tenant where the account_map component required by remote-state is deployed." +} + +variable "root_account_stage" { + type = string + default = "root" + description = "The stage name for the Organization root (master) account" +} + +variable "global_environment" { + type = string + default = "gbl" + description = "Global environment name" +} + +variable "privileged" { + type = bool + description = "True if the default provider already has access to the backend" + default = false +} + +variable "config_bucket_stage" { + type = string + description = "The stage of the AWS Config S3 Bucket" +} + +variable "config_bucket_env" { + type = string + description = "The environment of the AWS Config S3 Bucket" +} + +variable "config_bucket_tenant" { + type = string + default = "" + description = "(Optional) The tenant of the AWS Config S3 Bucket" +} + +variable "global_resource_collector_region" { + description = "The region that collects AWS Config data for global resources such as IAM" + type = string +} + +variable "central_resource_collector_account" { + description = "The name of the account that is the centralized aggregation account." + type = string +} + +variable "create_iam_role" { + description = "Flag to indicate whether an IAM Role should be created to grant the proper permissions for AWS Config" + type = bool + default = false +} + +variable "az_abbreviation_type" { + type = string + description = "AZ abbreviation type, `fixed` or `short`" + default = "fixed" +} + +variable "iam_role_arn" { + description = <<-DOC + The ARN for an IAM Role AWS Config uses to make read or write requests to the delivery channel and to describe the + AWS resources associated with the account. This is only used if create_iam_role is false. + + If you want to use an existing IAM Role, set the variable to the ARN of the existing role and set create_iam_role to `false`. + + See the AWS Docs for further information: + http://docs.aws.amazon.com/config/latest/developerguide/iamrole-permissions.html + DOC + default = null + type = string +} + +variable "conformance_packs" { + description = <<-DOC + List of conformance packs. Each conformance pack is a map with the following keys: name, conformance_pack, parameter_overrides. + + For example: + conformance_packs = [ + { + name = "Operational-Best-Practices-for-CIS-AWS-v1.4-Level1" + conformance_pack = "https://raw.githubusercontent.com/awslabs/aws-config-rules/master/aws-config-conformance-packs/Operational-Best-Practices-for-CIS-AWS-v1.4-Level1.yaml" + parameter_overrides = { + "AccessKeysRotatedParamMaxAccessKeyAge" = "45" + } + }, + { + name = "Operational-Best-Practices-for-CIS-AWS-v1.4-Level2" + conformance_pack = "https://raw.githubusercontent.com/awslabs/aws-config-rules/master/aws-config-conformance-packs/Operational-Best-Practices-for-CIS-AWS-v1.4-Level2.yaml" + parameter_overrides = { + "IamPasswordPolicyParamMaxPasswordAge" = "45" + } + } + ] + + Complete list of AWS Conformance Packs managed by AWSLabs can be found here: + https://github.com/awslabs/aws-config-rules/tree/master/aws-config-conformance-packs + DOC + type = list(object({ + name = string + conformance_pack = string + parameter_overrides = map(string) + scope = optional(string, null) + })) + default = [] + validation { + # verify scope is valid + condition = alltrue([for conformance_pack in var.conformance_packs : conformance_pack.scope == null || conformance_pack.scope == "account" || conformance_pack.scope == "organization"]) + error_message = "The scope must be either `account` or `organization`." + } +} + +variable "delegated_accounts" { + description = "The account IDs of other accounts that will send their AWS Configuration or Security Hub data to this account" + type = set(string) + default = null +} + +variable "iam_roles_environment_name" { + type = string + description = "The name of the environment where the IAM roles are provisioned" + default = "gbl" +} + +variable "managed_rules" { + description = <<-DOC + A list of AWS Managed Rules that should be enabled on the account. + + See the following for a list of possible rules to enable: + https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html + + Example: + ``` + managed_rules = { + access-keys-rotated = { + identifier = "ACCESS_KEYS_ROTATED" + description = "Checks whether the active access keys are rotated within the number of days specified in maxAccessKeyAge. The rule is NON_COMPLIANT if the access keys have not been rotated for more than maxAccessKeyAge number of days." + input_parameters = { + maxAccessKeyAge : "90" + } + enabled = true + tags = {} + } + } + ``` + DOC + type = map(object({ + description = string + identifier = string + input_parameters = any + tags = map(string) + enabled = bool + })) + default = {} +} + +variable "default_scope" { + type = string + description = "The default scope of the conformance pack. Valid values are `account` and `organization`." + default = "account" + validation { + condition = var.default_scope == "account" || var.default_scope == "organization" + error_message = "The scope must be either `account` or `organization`." + } +} diff --git a/modules/aws-config/versions.tf b/modules/aws-config/versions.tf new file mode 100644 index 000000000..65cf14c13 --- /dev/null +++ b/modules/aws-config/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + + awsutils = { + source = "cloudposse/awsutils" + version = ">= 0.16.0" + } + } +} diff --git a/modules/aws-inspector/README.md b/modules/aws-inspector/README.md new file mode 100644 index 000000000..ec1bc6084 --- /dev/null +++ b/modules/aws-inspector/README.md @@ -0,0 +1,139 @@ +--- +tags: + - component/aws-inspector + - layer/security-and-compliance + - provider/aws +--- + +# Component: `aws-inspector` + +This component is responsible for provisioning an +[AWS Inspector](https://docs.aws.amazon.com/inspector/latest/user/what-is-inspector.html) by installing the +[Inspector agent](https://repost.aws/knowledge-center/set-up-amazon-inspector) across all EC2 instances and applying the +Inspector rules. + +AWS Inspector is a security assessment service offered by Amazon Web Services (AWS). It helps you analyze and evaluate +the security and compliance of your applications and infrastructure deployed on AWS. AWS Inspector automatically +assesses the resources within your AWS environment, such as Amazon EC2 instances, for potential security vulnerabilities +and deviations from security best practices. + +Here are some key features and functionalities of AWS Inspector: + +- **Security Assessments:** AWS Inspector performs security assessments by analyzing the behavior of your resources and + identifying potential security vulnerabilities. It examines the network configuration, operating system settings, and + installed software to detect common security issues. + +- **Vulnerability Detection:** AWS Inspector uses a predefined set of rules to identify common vulnerabilities, + misconfigurations, and security exposures. It leverages industry-standard security best practices and continuously + updates its knowledge base to stay current with emerging threats. + +- **Agent-Based Architecture:** AWS Inspector utilizes an agent-based approach, where you install an Inspector agent on + your EC2 instances. The agent collects data about the system and its configuration, securely sends it to AWS + Inspector, and allows for more accurate and detailed assessments. + +- **Security Findings:** After performing an assessment, AWS Inspector generates detailed findings that highlight + security vulnerabilities, including their severity level, impact, and remediation steps. These findings can help you + prioritize and address security issues within your AWS environment. + +- **Integration with AWS Services:** AWS Inspector seamlessly integrates with other AWS services, such as AWS + CloudFormation, AWS Systems Manager, and AWS Security Hub. This allows you to automate security assessments, manage + findings, and centralize security information across your AWS infrastructure. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + aws-inspector: + vars: + enabled: true + enabled_rules: + - cis +``` + +The `aws-inspector` component can be included in your Terraform stack configuration. In the provided example, it is +enabled with the `enabled` variable set to `true`. The `enabled_rules` variable specifies a list of rules to enable, and +in this case, it includes the `cis` rule. To simplify rule selection, the short forms of the rules are used for the +`enabled_rules` key. These short forms automatically retrieve the appropriate ARN for the rule package based on the +region being used. You can find a list of available short forms and their corresponding rule packages in the +[var.enabled_rules](https://github.com/cloudposse/terraform-aws-inspector#input_enabled_rules) input documentation. + +For a comprehensive list of rules and their corresponding ARNs, you can refer to the +[Amazon Inspector ARNs for rules packages](https://docs.aws.amazon.com/inspector/latest/userguide/inspector_rules-arns.html) +documentation. This resource provides detailed information on various rules that can be used with AWS Inspector and +their unique identifiers (ARNs). + +By customizing the configuration with the appropriate rules, you can tailor the security assessments performed by AWS +Inspector to meet the specific requirements and compliance standards of your applications and infrastructure. + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [inspector](#module\_inspector) | cloudposse/inspector/aws | 0.2.8 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ssm_association.install_agent](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_association) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [enabled\_rules](#input\_enabled\_rules) | A list of AWS Inspector rules that should run on a periodic basis.

Valid values are `cve`, `cis`, `nr`, `sbp` which map to the appropriate [Inspector rule arns by region](https://docs.aws.amazon.com/inspector/latest/userguide/inspector_rules-arns.html). | `list(string)` | `[]` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [inspector](#output\_inspector) | The AWS Inspector module outputs | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/TODO) - + Cloud Posse's upstream component + [](https://cpco.io/component) diff --git a/modules/aws-inspector/context.tf b/modules/aws-inspector/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/aws-inspector/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/aws-inspector/main.tf b/modules/aws-inspector/main.tf new file mode 100644 index 000000000..f046e75ea --- /dev/null +++ b/modules/aws-inspector/main.tf @@ -0,0 +1,30 @@ +locals { + enabled = module.this.enabled +} + +resource "aws_ssm_association" "install_agent" { + count = local.enabled ? 1 : 0 + + # Owned by AWS + # https://docs.aws.amazon.com/inspector/latest/userguide/inspector_installing-uninstalling-agents.html + name = "AmazonInspector-ManageAWSAgent" + + parameters = { + Operation = "Install" + } + + targets { + key = "InstanceIds" + values = ["*"] + } +} + +module "inspector" { + source = "cloudposse/inspector/aws" + version = "0.2.8" + + create_iam_role = true + enabled_rules = var.enabled_rules + + context = module.this.context +} diff --git a/modules/aws-inspector/outputs.tf b/modules/aws-inspector/outputs.tf new file mode 100644 index 000000000..55989c0e4 --- /dev/null +++ b/modules/aws-inspector/outputs.tf @@ -0,0 +1,4 @@ +output "inspector" { + description = "The AWS Inspector module outputs" + value = module.inspector +} diff --git a/modules/aws-inspector/providers.tf b/modules/aws-inspector/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/aws-inspector/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/aws-inspector/variables.tf b/modules/aws-inspector/variables.tf new file mode 100644 index 000000000..6f9616604 --- /dev/null +++ b/modules/aws-inspector/variables.tf @@ -0,0 +1,14 @@ +variable "region" { + type = string + description = "AWS region" +} + +variable "enabled_rules" { + type = list(string) + default = [] + description = <<-DOC + A list of AWS Inspector rules that should run on a periodic basis. + + Valid values are `cve`, `cis`, `nr`, `sbp` which map to the appropriate [Inspector rule arns by region](https://docs.aws.amazon.com/inspector/latest/userguide/inspector_rules-arns.html). + DOC +} diff --git a/modules/aws-inspector/versions.tf b/modules/aws-inspector/versions.tf new file mode 100644 index 000000000..cc73ffd35 --- /dev/null +++ b/modules/aws-inspector/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/aws-inspector2/README.md b/modules/aws-inspector2/README.md new file mode 100644 index 000000000..0b9b0a581 --- /dev/null +++ b/modules/aws-inspector2/README.md @@ -0,0 +1,147 @@ +--- +tags: + - component/aws-inspector2 + - layer/security-and-compliance + - provider/aws +--- + +# Component: `aws-inspector2` + +This component is responsible for configuring Inspector V2 within an AWS Organization. + +## Usage + +**Stack Level**: Regional + +## Deployment Overview + +The deployment of this component requires multiple runs with different variable settings to properly configure the AWS +Organization. First, you delegate Inspector V2 central management to the Administrator account (usually `security` +account). After the Administrator account is delegated, we configure the it to manage Inspector V2 across all the +Organization accounts and send all their findings to that account. + +In the examples below, we assume that the AWS Organization Management account is `root` and the AWS Organization +Delegated Administrator account is `security`. + +### Deploy to Organization Management Account + +First, the component is deployed to the AWS Organization Management account `root` in each region in order to configure +the [AWS Delegated Administrator account](https://docs.aws.amazon.com/inspector/latest/user/designating-admin.html) that +operates Amazon Inspector V2. + +```yaml +# ue1-root +components: + terraform: + aws-inspector2/delegate-orgadmin/ue1: + metadata: + component: aws-inspector2 + vars: + enabled: true + region: us-east-1 +``` + +### Deploy Organization Settings in Delegated Administrator Account + +Now the component can be deployed to the Delegated Administrator Account `security` to create the organization-wide +configuration for all the Organization accounts. Note that `var.admin_delegated` set to `true` indicates that the +delegation has already been performed from the Organization Management account, and only the resources required for +organization-wide configuration will be created. + +```yaml +# ue1-security +components: + terraform: + aws-inspector2/orgadmin-configuration/ue1: + metadata: + component: aws-inspector2 + vars: + enabled: true + region: us-east-1 + admin_delegated: true +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 5.0 | +| [awsutils](#requirement\_awsutils) | >= 0.16.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 5.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.3 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_inspector2_delegated_admin_account.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/inspector2_delegated_admin_account) | resource | +| [aws_inspector2_enabler.delegated_admin](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/inspector2_enabler) | resource | +| [aws_inspector2_enabler.member_accounts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/inspector2_enabler) | resource | +| [aws_inspector2_member_association.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/inspector2_member_association) | resource | +| [aws_inspector2_organization_configuration.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/inspector2_organization_configuration) | resource | +| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant](#input\_account\_map\_tenant) | The tenant where the `account_map` component required by remote-state is deployed | `string` | `"core"` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_delegated](#input\_admin\_delegated) | A flag to indicate if the AWS Organization-wide settings should be created. This can only be done after the GuardDuty
Administrator account has already been delegated from the AWS Org Management account (usually 'root'). See the
Deployment section of the README for more information. | `bool` | `false` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [auto\_enable\_ec2](#input\_auto\_enable\_ec2) | Whether Amazon EC2 scans are automatically enabled for new members of the Amazon Inspector organization. | `bool` | `true` | no | +| [auto\_enable\_ecr](#input\_auto\_enable\_ecr) | Whether Amazon ECR scans are automatically enabled for new members of the Amazon Inspector organization. | `bool` | `true` | no | +| [auto\_enable\_lambda](#input\_auto\_enable\_lambda) | Whether Lambda Function scans are automatically enabled for new members of the Amazon Inspector organization. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delegated\_administrator\_account\_name](#input\_delegated\_administrator\_account\_name) | The name of the account that is the AWS Organization Delegated Administrator account | `string` | `"security"` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [member\_association\_excludes](#input\_member\_association\_excludes) | List of account names to exlude from Amazon Inspector member association | `list(string)` | `[]` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [organization\_management\_account\_name](#input\_organization\_management\_account\_name) | The name of the AWS Organization management account | `string` | `null` | no | +| [privileged](#input\_privileged) | true if the default provider already has access to the backend | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (management) account. This is used to lookup account IDs from account names
using the `account-map` component. | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aws\_inspector2\_member\_association](#output\_aws\_inspector2\_member\_association) | The Inspector2 member association resource. | + + + +## References + +- [Amazon Inspector V2 Documentation](https://docs.aws.amazon.com/inspector/latest/user/what-is-inspector.html) +- [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/guardduty/common/) + +[](https://cpco.io/component) diff --git a/modules/aws-inspector2/context.tf b/modules/aws-inspector2/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/aws-inspector2/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/aws-inspector2/main.tf b/modules/aws-inspector2/main.tf new file mode 100644 index 000000000..1c6b9ff33 --- /dev/null +++ b/modules/aws-inspector2/main.tf @@ -0,0 +1,63 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map + + current_account_id = one(data.aws_caller_identity.this[*].account_id) + member_account_ids = [for a in keys(local.account_map) : (local.account_map[a]) if(local.account_map[a] != local.current_account_id) && !contains(var.member_association_excludes, local.account_map[a])] + + org_delegated_administrator_account_id = local.account_map[var.delegated_administrator_account_name] + org_management_account_id = var.organization_management_account_name == null ? local.account_map[module.account_map.outputs.root_account_account_name] : local.account_map[var.organization_management_account_name] + + is_org_delegated_administrator_account = local.current_account_id == local.org_delegated_administrator_account_id + is_org_management_account = local.current_account_id == local.org_management_account_id + + create_org_delegation = local.enabled && local.is_org_management_account + create_org_configuration = local.enabled && local.is_org_delegated_administrator_account && var.admin_delegated + + resource_types = compact([var.auto_enable_ec2 ? "EC2" : null, var.auto_enable_ecr ? "ECR" : null, var.auto_enable_lambda ? "Lambda" : null]) +} + +data "aws_caller_identity" "this" { + count = local.enabled ? 1 : 0 +} + +# If we are in the AWS Organization management account, delegate Inspector2 to +# the administrator account (usually the security account). +resource "aws_inspector2_delegated_admin_account" "default" { + count = local.create_org_delegation ? 1 : 0 + account_id = local.org_delegated_administrator_account_id +} + +resource "aws_inspector2_enabler" "delegated_admin" { + count = local.create_org_configuration ? 1 : 0 + + account_ids = [local.org_delegated_administrator_account_id] + resource_types = local.resource_types +} + +# If we are are in the AWS Organization designated administrator account, +# configure all other accounts to send their Inspector2 findings. +resource "aws_inspector2_organization_configuration" "default" { + count = local.create_org_configuration ? 1 : 0 + + depends_on = [aws_inspector2_enabler.delegated_admin] + auto_enable { + ec2 = var.auto_enable_ec2 + ecr = var.auto_enable_ecr + lambda = var.auto_enable_lambda + } +} + +resource "aws_inspector2_enabler" "member_accounts" { + count = local.create_org_configuration ? 1 : 0 + + depends_on = [aws_inspector2_member_association.default] + + account_ids = local.member_account_ids + resource_types = local.resource_types +} + +resource "aws_inspector2_member_association" "default" { + for_each = local.create_org_configuration ? toset(local.member_account_ids) : [] + account_id = each.value +} diff --git a/modules/aws-inspector2/outputs.tf b/modules/aws-inspector2/outputs.tf new file mode 100644 index 000000000..7c3779cfc --- /dev/null +++ b/modules/aws-inspector2/outputs.tf @@ -0,0 +1,4 @@ +output "aws_inspector2_member_association" { + value = aws_inspector2_member_association.default + description = "The Inspector2 member association resource." +} diff --git a/modules/aws-inspector2/providers.tf b/modules/aws-inspector2/providers.tf new file mode 100644 index 000000000..582f2f95c --- /dev/null +++ b/modules/aws-inspector2/providers.tf @@ -0,0 +1,18 @@ +provider "aws" { + region = var.region + + profile = !var.privileged && module.iam_roles.profiles_enabled ? module.iam_roles.terraform_profile_name : null + dynamic "assume_role" { + for_each = var.privileged || module.iam_roles.profiles_enabled || (module.iam_roles.terraform_role_arn == null) ? [] : ["role"] + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + privileged = var.privileged + + context = module.this.context +} diff --git a/modules/aws-inspector2/remote-state.tf b/modules/aws-inspector2/remote-state.tf new file mode 100644 index 000000000..da115834a --- /dev/null +++ b/modules/aws-inspector2/remote-state.tf @@ -0,0 +1,12 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.3" + + component = "account-map" + tenant = var.account_map_tenant != "" ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} diff --git a/modules/aws-inspector2/variables.tf b/modules/aws-inspector2/variables.tf new file mode 100644 index 000000000..1880ef921 --- /dev/null +++ b/modules/aws-inspector2/variables.tf @@ -0,0 +1,77 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "auto_enable_ec2" { + description = "Whether Amazon EC2 scans are automatically enabled for new members of the Amazon Inspector organization." + type = bool + default = true +} + +variable "auto_enable_ecr" { + description = "Whether Amazon ECR scans are automatically enabled for new members of the Amazon Inspector organization." + type = bool + default = true +} + +variable "auto_enable_lambda" { + description = "Whether Lambda Function scans are automatically enabled for new members of the Amazon Inspector organization." + type = bool + default = true +} + +variable "account_map_tenant" { + type = string + default = "core" + description = "The tenant where the `account_map` component required by remote-state is deployed" +} + +variable "root_account_stage" { + type = string + default = "root" + description = <<-DOC + The stage name for the Organization root (management) account. This is used to lookup account IDs from account names + using the `account-map` component. + DOC +} + +variable "global_environment" { + type = string + default = "gbl" + description = "Global environment name" +} + +variable "privileged" { + type = bool + default = false + description = "true if the default provider already has access to the backend" +} + +variable "organization_management_account_name" { + type = string + default = null + description = "The name of the AWS Organization management account" +} + +variable "member_association_excludes" { + description = "List of account names to exlude from Amazon Inspector member association" + type = list(string) + default = [] +} + +variable "delegated_administrator_account_name" { + type = string + default = "security" + description = "The name of the account that is the AWS Organization Delegated Administrator account" +} + +variable "admin_delegated" { + type = bool + default = false + description = < ## Requirements @@ -85,10 +96,11 @@ components: | [saml\_provider\_arns](#output\_saml\_provider\_arns) | Map of SAML provider names to provider ARNs | | [saml\_provider\_assume\_role\_policy](#output\_saml\_provider\_assume\_role\_policy) | JSON "assume role" policy document to use for roles allowed to log in via SAML | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/sso) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/sso) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/aws-saml/main.tf b/modules/aws-saml/main.tf index 361e80d60..959ffefe1 100644 --- a/modules/aws-saml/main.tf +++ b/modules/aws-saml/main.tf @@ -25,6 +25,7 @@ data "aws_iam_policy_document" "saml_provider_assume" { sid = "SamlProviderAssume" actions = [ "sts:AssumeRoleWithSAML", + "sts:SetSourceIdentity", "sts:TagSession", ] @@ -35,10 +36,20 @@ data "aws_iam_policy_document" "saml_provider_assume" { } condition { - test = "StringEquals" + # Use StringLike rather than StringEquals to avoid having to list every region's endpoint + test = "StringLike" variable = "SAML:aud" - values = ["https://signin.aws.amazon.com/saml"] + # Allow sign in from any valid AWS SAML endpoint + # See https://docs.aws.amazon.com/general/latest/gr/signin-service.html + # and https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#condition-keys-saml + # Note: The value for this key comes from the SAML Recipient field in the assertion, not the Audience field, + # and is thus not the actual SAML:aud in the SAML assertion. + values = [ + "https://signin.aws.amazon.com/saml", + "https://*.signin.aws.amazon.com/saml", + "https://signin.amazonaws-us-gov.com/saml", + "https://us-gov-east-1.signin.amazonaws-us-gov.com/saml", + ] } } } - diff --git a/modules/aws-saml/modules/okta-user/main.tf b/modules/aws-saml/modules/okta-user/main.tf index 224b35fe7..e309cfe18 100644 --- a/modules/aws-saml/modules/okta-user/main.tf +++ b/modules/aws-saml/modules/okta-user/main.tf @@ -1,4 +1,10 @@ +locals { + enabled = module.this.enabled +} + resource "aws_iam_user" "default" { + count = local.enabled ? 1 : 0 + name = module.this.id tags = module.this.tags force_destroy = true @@ -24,13 +30,13 @@ resource "aws_iam_policy" "default" { } resource "aws_iam_user_policy_attachment" "default" { - user = aws_iam_user.default.name + user = one(aws_iam_user.default[*].name) policy_arn = aws_iam_policy.default.arn } # Generate API credentials resource "aws_iam_access_key" "default" { - user = aws_iam_user.default.name + user = one(aws_iam_user.default[*].name) } resource "aws_ssm_parameter" "okta_user_access_key_id" { diff --git a/modules/aws-saml/modules/okta-user/outputs.tf b/modules/aws-saml/modules/okta-user/outputs.tf index 8caa1f211..9472405e1 100644 --- a/modules/aws-saml/modules/okta-user/outputs.tf +++ b/modules/aws-saml/modules/okta-user/outputs.tf @@ -1,14 +1,14 @@ output "user_name" { - value = aws_iam_user.default.name + value = one(aws_iam_user.default[*].name) description = "User name" } output "user_arn" { - value = aws_iam_user.default.arn + value = one(aws_iam_user.default[*].arn) description = "User ARN" } output "ssm_prefix" { - value = "AWS Key for ${aws_iam_user.default.name} is in Systems Manager Parameter Store under ${aws_ssm_parameter.okta_user_access_key_id.name} and ${aws_ssm_parameter.okta_user_secret_access_key.name}" + value = "AWS Key for ${one(aws_iam_user.default[*].name)} is in Systems Manager Parameter Store under ${aws_ssm_parameter.okta_user_access_key_id.name} and ${aws_ssm_parameter.okta_user_secret_access_key.name}" description = "Where to find the AWS API key information for the user" } diff --git a/modules/aws-saml/outputs.tf b/modules/aws-saml/outputs.tf index b98deba72..89ed29cdf 100644 --- a/modules/aws-saml/outputs.tf +++ b/modules/aws-saml/outputs.tf @@ -15,4 +15,3 @@ output "saml_provider_assume_role_policy" { value = one(data.aws_iam_policy_document.saml_provider_assume[*].json) description = "JSON \"assume role\" policy document to use for roles allowed to log in via SAML" } - diff --git a/modules/aws-shield/README.md b/modules/aws-shield/README.md new file mode 100644 index 000000000..e3114ef40 --- /dev/null +++ b/modules/aws-shield/README.md @@ -0,0 +1,178 @@ +--- +tags: + - component/aws-shield + - layer/security-and-compliance + - provider/aws +--- + +# Component: `aws-shield` + +This component is responsible for enabling AWS Shield Advanced Protection for the following resources: + +- Application Load Balancers (ALBs) +- CloudFront Distributions +- Elastic IPs +- Route53 Hosted Zones + +This component assumes that resources it is configured to protect are not already protected by other components that +have their `xxx_aws_shield_protection_enabled` variable set to `true`. + +This component also requires that the account where the component is being provisioned to has been +[subscribed to AWS Shield Advanced](https://docs.aws.amazon.com/waf/latest/developerguide/enable-ddos-prem.html). + +## Usage + +**Stack Level**: Global or Regional + +The following snippet shows how to use all of this component's features in a stack configuration: + +```yaml +components: + terraform: + aws-shield: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + route53_zone_names: + - test.ue1.example.net + alb_names: + - k8s-common-2c5f23ff99 + cloudfront_distribution_ids: + - EDFDVBD632BHDS5 + eips: + - 3.214.128.240 + - 35.172.208.150 + - 35.171.70.50 +``` + +A typical global configuration will only include the `route53_zone_names` and `cloudfront_distribution_ids` variables, +as global Route53 Hosted Zones may exist in that account, and because CloudFront is a global AWS service. + +A global stack configuration will not have a VPC, and hence `alb_names` and `eips` should not be defined: + +```yaml +components: + terraform: + aws-shield: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + route53_zone_names: + - test.example.net + cloudfront_distribution_ids: + - EDFDVBD632BHDS5 +``` + +Regional stack configurations will typically make use of all resources except for `cloudfront_distribution_ids`: + +```yaml +components: + terraform: + aws-shield: + settings: + spacelift: + workspace_enabled: true + vars: + route53_zone_names: + - test.ue1.example.net + alb_names: + - k8s-common-2c5f23ff99 + eips: + - 3.214.128.240 + - 35.172.208.150 + - 35.171.70.50 +``` + +Stack configurations which rely on components with a `xxx_aws_shield_protection_enabled` variable should set that +variable to `true` and leave the corresponding variable for this component as empty, relying on that component's AWS +Shield Advanced functionality instead. This leads to more simplified inter-component dependencies and minimizes the need +for maintaining the provisioning order during a cold-start. + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [alb](#module\_alb) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_shield_protection.alb_shield_protection](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/shield_protection) | resource | +| [aws_shield_protection.cloudfront_shield_protection](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/shield_protection) | resource | +| [aws_shield_protection.eip_shield_protection](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/shield_protection) | resource | +| [aws_shield_protection.route53_zone_protection](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/shield_protection) | resource | +| [aws_alb.alb](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/alb) | data source | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_cloudfront_distribution.cloudfront_distribution](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/cloudfront_distribution) | data source | +| [aws_eip.eip](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eip) | data source | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_route53_zone.route53_zone](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/route53_zone) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [alb\_names](#input\_alb\_names) | list of ALB names which will be protected with AWS Shield Advanced | `list(string)` | `[]` | no | +| [alb\_protection\_enabled](#input\_alb\_protection\_enabled) | Enable ALB protection. By default, ALB names are read from the EKS cluster ALB control group | `bool` | `false` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [cloudfront\_distribution\_ids](#input\_cloudfront\_distribution\_ids) | list of CloudFront Distribution IDs which will be protected with AWS Shield Advanced | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eips](#input\_eips) | List of Elastic IPs which will be protected with AWS Shield Advanced | `list(string)` | `[]` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [route53\_zone\_names](#input\_route53\_zone\_names) | List of Route53 Hosted Zone names which will be protected with AWS Shield Advanced | `list(string)` | `[]` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [application\_load\_balancer\_protections](#output\_application\_load\_balancer\_protections) | AWS Shield Advanced Protections for ALBs | +| [cloudfront\_distribution\_protections](#output\_cloudfront\_distribution\_protections) | AWS Shield Advanced Protections for CloudFront Distributions | +| [elastic\_ip\_protections](#output\_elastic\_ip\_protections) | AWS Shield Advanced Protections for Elastic IPs | +| [route53\_hosted\_zone\_protections](#output\_route53\_hosted\_zone\_protections) | AWS Shield Advanced Protections for Route53 Hosted Zones | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/aws-shield) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/aws-shield/alb.tf b/modules/aws-shield/alb.tf new file mode 100644 index 000000000..5a7c70aae --- /dev/null +++ b/modules/aws-shield/alb.tf @@ -0,0 +1,14 @@ +data "aws_alb" "alb" { + for_each = local.alb_protection_enabled == false ? toset([]) : length(var.alb_names) > 0 ? toset(var.alb_names) : toset([module.alb[0].outputs.load_balancer_name]) + + name = each.key +} + +resource "aws_shield_protection" "alb_shield_protection" { + for_each = local.alb_protection_enabled ? data.aws_alb.alb : {} + + name = data.aws_alb.alb[each.key].name + resource_arn = data.aws_alb.alb[each.key].arn + + tags = local.tags +} diff --git a/modules/aws-shield/cloudfront.tf b/modules/aws-shield/cloudfront.tf new file mode 100644 index 000000000..85ae8aebd --- /dev/null +++ b/modules/aws-shield/cloudfront.tf @@ -0,0 +1,14 @@ +data "aws_cloudfront_distribution" "cloudfront_distribution" { + for_each = local.cloudfront_distribution_protection_enabled ? toset(var.cloudfront_distribution_ids) : [] + + id = each.key +} + +resource "aws_shield_protection" "cloudfront_shield_protection" { + for_each = local.cloudfront_distribution_protection_enabled ? data.aws_cloudfront_distribution.cloudfront_distribution : {} + + name = data.aws_cloudfront_distribution.cloudfront_distribution[each.key].domain_name + resource_arn = data.aws_cloudfront_distribution.cloudfront_distribution[each.key].arn + + tags = local.tags +} diff --git a/modules/aws-shield/context.tf b/modules/aws-shield/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/aws-shield/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/aws-shield/eip.tf b/modules/aws-shield/eip.tf new file mode 100644 index 000000000..e4c3afa1f --- /dev/null +++ b/modules/aws-shield/eip.tf @@ -0,0 +1,14 @@ +data "aws_eip" "eip" { + for_each = local.eip_protection_enabled ? toset(var.eips) : [] + + public_ip = each.key +} + +resource "aws_shield_protection" "eip_shield_protection" { + for_each = local.eip_protection_enabled ? data.aws_eip.eip : {} + + name = data.aws_eip.eip[each.key].id + resource_arn = "arn:${local.partition}:ec2:${var.region}:${local.account_id}:eip-allocation/${data.aws_eip.eip[each.key].id}" + + tags = local.tags +} diff --git a/modules/aws-shield/main.tf b/modules/aws-shield/main.tf new file mode 100644 index 000000000..764f4995a --- /dev/null +++ b/modules/aws-shield/main.tf @@ -0,0 +1,22 @@ +locals { + enabled = module.this.enabled + tags = module.this.tags + + account_id = one(data.aws_caller_identity.current[*].account_id) + + # Used to determine correct partition (i.e. - `aws`, `aws-gov`, `aws-cn`, etc.) + partition = one(data.aws_partition.current[*].partition) + + alb_protection_enabled = local.enabled && var.alb_protection_enabled + cloudfront_distribution_protection_enabled = local.enabled && length(var.cloudfront_distribution_ids) > 0 + eip_protection_enabled = local.enabled && length(var.eips) > 0 + route53_protection_enabled = local.enabled && length(var.route53_zone_names) > 0 +} + +data "aws_caller_identity" "current" { + count = local.enabled ? 1 : 0 +} + +data "aws_partition" "current" { + count = local.enabled ? 1 : 0 +} diff --git a/modules/aws-shield/outputs.tf b/modules/aws-shield/outputs.tf new file mode 100644 index 000000000..56dede749 --- /dev/null +++ b/modules/aws-shield/outputs.tf @@ -0,0 +1,19 @@ +output "application_load_balancer_protections" { + description = "AWS Shield Advanced Protections for ALBs" + value = aws_shield_protection.alb_shield_protection +} + +output "cloudfront_distribution_protections" { + description = "AWS Shield Advanced Protections for CloudFront Distributions" + value = aws_shield_protection.cloudfront_shield_protection +} + +output "elastic_ip_protections" { + description = "AWS Shield Advanced Protections for Elastic IPs" + value = aws_shield_protection.eip_shield_protection +} + +output "route53_hosted_zone_protections" { + description = "AWS Shield Advanced Protections for Route53 Hosted Zones" + value = aws_shield_protection.route53_zone_protection +} diff --git a/modules/aws-shield/providers.tf b/modules/aws-shield/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/aws-shield/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/aws-shield/remote-state.tf b/modules/aws-shield/remote-state.tf new file mode 100644 index 000000000..109fa6c7b --- /dev/null +++ b/modules/aws-shield/remote-state.tf @@ -0,0 +1,9 @@ +module "alb" { + count = local.alb_protection_enabled == false ? 0 : length(var.alb_names) > 0 ? 0 : 1 + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "eks/alb-controller-ingress-group" + + context = module.this.context +} diff --git a/modules/aws-shield/route53.tf b/modules/aws-shield/route53.tf new file mode 100644 index 000000000..c73555dd0 --- /dev/null +++ b/modules/aws-shield/route53.tf @@ -0,0 +1,14 @@ +data "aws_route53_zone" "route53_zone" { + for_each = local.route53_protection_enabled ? toset(var.route53_zone_names) : [] + + name = each.key +} + +resource "aws_shield_protection" "route53_zone_protection" { + for_each = local.route53_protection_enabled ? data.aws_route53_zone.route53_zone : {} + + name = data.aws_route53_zone.route53_zone[each.key].name + resource_arn = "arn:${local.partition}:route53:::hostedzone/${data.aws_route53_zone.route53_zone[each.key].id}" + + tags = local.tags +} diff --git a/modules/aws-shield/variables.tf b/modules/aws-shield/variables.tf new file mode 100644 index 000000000..882e3fb86 --- /dev/null +++ b/modules/aws-shield/variables.tf @@ -0,0 +1,34 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "alb_names" { + description = "list of ALB names which will be protected with AWS Shield Advanced" + type = list(string) + default = [] +} + +variable "alb_protection_enabled" { + description = "Enable ALB protection. By default, ALB names are read from the EKS cluster ALB control group" + type = bool + default = false +} + +variable "cloudfront_distribution_ids" { + description = "list of CloudFront Distribution IDs which will be protected with AWS Shield Advanced" + type = list(string) + default = [] +} + +variable "eips" { + description = "List of Elastic IPs which will be protected with AWS Shield Advanced" + type = list(string) + default = [] +} + +variable "route53_zone_names" { + description = "List of Route53 Hosted Zone names which will be protected with AWS Shield Advanced" + type = list(string) + default = [] +} diff --git a/modules/aws-shield/versions.tf b/modules/aws-shield/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/modules/aws-shield/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/aws-sso/CHANGELOG.md b/modules/aws-sso/CHANGELOG.md new file mode 100644 index 000000000..ff01dbd53 --- /dev/null +++ b/modules/aws-sso/CHANGELOG.md @@ -0,0 +1,47 @@ +# Change log for aws-sso component + +**_NOTE_**: This file is manually generated and is a work-in-progress. + +### PR 830 + +- Fix `providers.tf` to properly assign roles for `root` account when deploying to `identity` account. +- Restore the `sts:SetSourceIdentity` permission for Identity-role-TeamAccess permission sets added in PR 738 and + inadvertently removed in PR 740. +- Update comments and documentation to reflect Cloud Posse's current recommendation that SSO **_not_** be delegated to + the `identity` account. + +### Version 1.240.1, PR 740 + +This PR restores compatibility with `account-map` prior to version 1.227.0 and fixes bugs that made versions 1.227.0 up +to this release unusable. + +Access control configuration (`aws-teams`, `iam-primary-roles`, `aws-sso`, etc.) has undergone several transformations +over the evolution of Cloud Posse's reference architecture. This update resolves a number of compatibility issues with +some of them. + +If the roles you are using to deploy this component are allowed to assume the `tfstate-backend` access roles (typically +`...-gbl-root-tfstate`, possibly `...-gbl-root-tfstate-ro` or `...-gbl-root-terraform`), then you can use the defaults. +This configuration was introduced in `terraform-aws-components` v1.227.0 and is the default for all new deployments. + +If the roles you are using to deploy this component are not allowed to assume the `tfstate-backend` access roles, then +you will need to configure this component to include the following: + +```yaml +components: + terraform: + aws-sso: + backend: + s3: + role_arn: null + vars: + privileged: true +``` + +If you are deploying this component to the `identity` account, then this restriction will require you to deploy it via +the SuperAdmin user. If you are deploying this component to the `root` account, then any user or role in the `root` +account with the `AdministratorAccess` policy attached will be able to deploy this component. + +## v1.227.0 + +This component was broken by changes made in v1.227.0. Either use a version before v1.227.0 or use the version released +by PR 740 or later. diff --git a/modules/aws-sso/README.md b/modules/aws-sso/README.md index 7b6a4f7c3..a11ef71da 100644 --- a/modules/aws-sso/README.md +++ b/modules/aws-sso/README.md @@ -1,8 +1,18 @@ +--- +tags: + - component/aws-sso + - layer/identity + - provider/aws + - privileged +--- + # Component: `aws-sso` -This component is responsible for creating [AWS SSO Permission Sets][1] and creating AWS SSO Account Assignments, that is, assigning IdP (Okta) groups and/or users to AWS SSO permission sets in specific AWS Accounts. +This component is responsible for creating [AWS SSO Permission Sets][1] and creating AWS SSO Account Assignments, that +is, assigning IdP (Okta) groups and/or users to AWS SSO permission sets in specific AWS Accounts. -This component assumes that AWS SSO has already been enabled via the AWS Console (there isn't terraform or AWS CLI support for this currently) and that the IdP has been configured to sync users and groups to AWS SSO. +This component assumes that AWS SSO has already been enabled via the AWS Console (there isn't terraform or AWS CLI +support for this currently) and that the IdP has been configured to sync users and groups to AWS SSO. ## Usage @@ -12,20 +22,57 @@ This component assumes that AWS SSO has already been enabled via the AWS Console 1. Select primary region 1. Go to AWS SSO 1. Enable AWS SSO + +#### Delegation no longer recommended + +Previously, Cloud Posse recommended delegating SSO to the identity account by following the next 2 steps: + 1. Click Settings > Management -1. Delegate Identity as an administrator +1. Delegate Identity as an administrator. This can take up to 30 minutes to take effect. + +However, this is no longer recommended. Because the delegated SSO administrator cannot make changes in the `root` +account and this component needs to be able to make changes in the `root` account, any purported security advantage +achieved by delegating SSO to the `identity` account is lost. -Once identity is delegated, it will take up to 20 to 30 minutes for the identity account to understand its delegation. +Nevertheless, it is also not worth the effort to remove the delegation. If you have already delegated SSO to the +`identity`, continue on, leaving the stack configuration in the `gbl-identity` stack rather than the currently +recommended `gbl-root` stack. + +### Google Workspace + +> [!IMPORTANT] +> +> > Your identity source is currently configured as 'External identity provider'. To add new groups or edit their +> > memberships, you must do this using your external identity provider. +> +> Groups _cannot_ be created with ClickOps in the AWS console and instead must be created with AWS API. + +Google Workspace is now supported by AWS Identity Center, but Group creation is not automatically handled. After +[configuring SAML and SCIM with Google Workspace and IAM Identity Center following the AWS documentation](https://docs.aws.amazon.com/singlesignon/latest/userguide/gs-gwp.html), +add any Group name to `var.groups` to create the Group with Terraform. Once the setup steps as described in the AWS +documentation have been completed and the Groups are created with Terraform, Users should automatically populate each +created Group. + +```yaml +components: + terraform: + aws-sso: + vars: + groups: + - "Developers" + - "Dev Ops" +``` ### Atmos -**Stack Level**: Global -**Deployment**: Must be deployed by root-admin using `atmos` CLI +**Stack Level**: Global **Deployment**: Must be deployed by root-admin using `atmos` CLI -Add catalog to `gbl-identity` root stack. +Add catalog to `gbl-root` root stack. #### `account_assignments` -The `account_assignments` setting configures access to permission sets for users and groups in accounts, in the following structure: + +The `account_assignments` setting configures access to permission sets for users and groups in accounts, in the +following structure: ```yaml : @@ -40,18 +87,75 @@ The `account_assignments` setting configures access to permission sets for users ``` - The account names (a.k.a. "stages") must already be configured via the `accounts` component. -- The user and group names must already exist in AWS SSO. Usually this is accomplished by configuring them in Okta and syncing Okta with AWS SSO. -- The permission sets are defined (by convention) in files names `policy-.tf` in the `aws-sso` component. The definition includes the name of the permission set. See `components/terraform/aws-sso/policy-AdminstratorAccess.tf` for an example. +- The user and group names must already exist in AWS SSO. Usually this is accomplished by configuring them in Okta and + syncing Okta with AWS SSO. +- The permission sets are defined (by convention) in files names `policy-.tf` in the `aws-sso` + component. The definition includes the name of the permission set. See + `components/terraform/aws-sso/policy-AdminstratorAccess.tf` for an example. #### `identity_roles_accessible` -The `identity_roles_accessible` element provides a list of role names corresponding to roles created in the `iam-primary-roles` component. For each names role, a corresponding permission set will be created which allows the user to assume that role. The permission set name is generated in Terraform from the role name using this statement: + +The `identity_roles_accessible` element provides a list of role names corresponding to roles created in the +`iam-primary-roles` component. For each named role, a corresponding permission set will be created which allows the user +to assume that role. The permission set name is generated in Terraform from the role name using this statement: ``` -format("Identity%sRoleAccess", title(role)) +format("Identity%sTeamAccess", replace(title(role), "-", "")) ``` +### Defining a new permission set + +1. Give the permission set a name, capitalized, in CamelCase, e.g. `AuditManager`. We will use `NAME` as a placeholder + for the name in the instructions below. In Terraform, convert the name to lowercase snake case, e.g. `audit_manager`. +2. Create a file in the `aws-sso` directory with the name `policy-NAME.tf`. +3. In that file, create a policy as follows: + + ```hcl + data "aws_iam_policy_document" "TerraformUpdateAccess" { + # Define the custom policy here + } + + locals { + NAME_permission_set = { # e.g. audit_manager_permission_set + name = "NAME", # e.g. AuditManager + description = "", + relay_state = "", + session_duration = "PT1H", # One hour, maximum allowed for chained assumed roles + tags = {}, + inline_policy = data.aws_iam_policy_document.NAME.json, + policy_attachments = [] # ARNs of AWS managed IAM policies to attach, e.g. arn:aws:iam::aws:policy/ReadOnlyAccess + customer_managed_policy_attachments = [] # ARNs of customer managed IAM policies to attach + } + } + ``` + +4. Create a file named `additional-permission-sets-list_override.tf` in the `aws-sso` directory (if it does not already + exist). This is a [terraform override file](https://developer.hashicorp.com/terraform/language/files/override), + meaning its contents will be merged with the main terraform file, and any locals defined in it will override locals + defined in other files. Having your code in this separate override file makes it possible for the component to + provide a placeholder local variable so that it works without customization, while allowing you to customize the + component and still update it without losing your customizations. +5. In that file, redefine the local variable `overridable_additional_permission_sets` as follows: + + ```hcl + locals { + overridable_additional_permission_sets = [ + local.NAME_permission_set, + ] + } + ``` + + If you have multiple custom policies, add each one to the list. + +6. With that done, the new permission set will be created when the changes are applied. You can then use it just like + the others. +7. If you want the permission set to be able to use Terraform, enable access to the Terraform state read/write (default) + role in `tfstate-backend`. + #### Example -The example snippet below shows how to use this module with various combinations (plain YAML, YAML Anchors and a combination of the two): + +The example snippet below shows how to use this module with various combinations (plain YAML, YAML Anchors and a +combination of the two): ```yaml prod-cloud-engineers: &prod-cloud-engineers @@ -75,7 +179,7 @@ components: groups: *prod-cloud-engineers prod: groups: - Admininstrators: + Administrators: permission_sets: - AdministratorAccess - ReadOnlyAccess @@ -84,7 +188,7 @@ components: - ReadOnlyAccess dev: groups: - Admininstrators: + Administrators: permission_sets: - AdministratorAccess - ReadOnlyAccess @@ -92,50 +196,53 @@ components: permission_sets: - AdministratorAccess - ReadOnlyAccess - identity_roles_accessible: - - "admin" - - "ops" - - "poweruser" - - "observer" - - "reader" - - "support" - - "viewer" - + aws_teams_accessible: + - "developers" + - "devops" + - "managers" + - "support" ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [permission\_sets](#module\_permission\_sets) | cloudposse/sso/aws//modules/permission-sets | 0.6.2 | -| [role\_prefix](#module\_role\_prefix) | cloudposse/label/null | 0.25.0 | -| [sso\_account\_assignments](#module\_sso\_account\_assignments) | cloudposse/sso/aws//modules/account-assignments | 0.6.2 | -| [sso\_account\_assignments\_root](#module\_sso\_account\_assignments\_root) | cloudposse/sso/aws//modules/account-assignments | 0.6.2 | +| [iam\_roles\_root](#module\_iam\_roles\_root) | ../account-map/modules/iam-roles | n/a | +| [permission\_sets](#module\_permission\_sets) | cloudposse/sso/aws//modules/permission-sets | 1.1.1 | +| [role\_map](#module\_role\_map) | ../account-map/modules/roles-to-principals | n/a | +| [sso\_account\_assignments](#module\_sso\_account\_assignments) | cloudposse/sso/aws//modules/account-assignments | 1.1.1 | +| [sso\_account\_assignments\_root](#module\_sso\_account\_assignments\_root) | cloudposse/sso/aws//modules/account-assignments | 1.1.1 | +| [tfstate](#module\_tfstate) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources | Name | Type | |------|------| -| [aws_iam_policy_document.assume_identity_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_identitystore_group.manual](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/identitystore_group) | resource | +| [aws_iam_policy_document.assume_aws_team](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.dns_administrator_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.eks_read_only](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.terraform_update_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_ssoadmin_instances.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssoadmin_instances) | data source | ## Inputs @@ -144,34 +251,37 @@ components: | [account\_assignments](#input\_account\_assignments) | Enables access to permission sets for users and groups in accounts, in the following structure:
yaml
:
groups:
:
permission_sets:
-
users:
:
permission_sets:
-
|
map(map(map(object({
permission_sets = list(string)
}
))))
| `{}` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [aws\_teams\_accessible](#input\_aws\_teams\_accessible) | List of IAM roles (e.g. ["admin", "terraform"]) for which to create permission
sets that allow the user to assume that role. Named like
admin -> IdentityAdminTeamAccess | `set(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [global\_environment\_name](#input\_global\_environment\_name) | Global environment name | `string` | `"gbl"` | no | -| [iam\_primary\_roles\_stage\_name](#input\_iam\_primary\_roles\_stage\_name) | The name of the stage where the IAM primary roles are provisioned | `string` | `"identity"` | no | +| [groups](#input\_groups) | List of AWS Identity Center Groups to be created with the AWS API.

When provisioning the Google Workspace Integration with AWS, Groups need to be created with API in order for automatic provisioning to work as intended. | `list(string)` | `[]` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [identity\_roles\_accessible](#input\_identity\_roles\_accessible) | List of IAM roles (e.g. ["admin", "terraform"]) for which to create permission
sets that allow the user to assume that role. Named like
admin -> IdentityAdminRoleAccess | `set(string)` | `[]` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [privileged](#input\_privileged) | True if the default provider already has access to the backend | `bool` | `true` | no | +| [privileged](#input\_privileged) | True if the user running the Terraform command already has access to the Terraform backend | `bool` | `false` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [root\_account\_stage\_name](#input\_root\_account\_stage\_name) | The name of the stage where `account_map` is provisioned | `string` | `"root"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [tfstate\_environment\_name](#input\_tfstate\_environment\_name) | The name of the environment where `tfstate-backend` is provisioned. If not set, the TerraformUpdateAccess permission set will not be created. | `string` | `null` | no | ## Outputs -No outputs. +| Name | Description | +|------|-------------| +| [group\_ids](#output\_group\_ids) | Group IDs created for Identity Center | +| [permission\_sets](#output\_permission\_sets) | Permission sets | +| [sso\_account\_assignments](#output\_sso\_account\_assignments) | SSO account assignments | + ## References @@ -179,43 +289,43 @@ No outputs. [][40] -[1]: https://docs.aws.amazon.com/singlesignon/latest/userguide/permissionsetsconcept.html -[2]: #requirement%5C_terraform -[3]: #requirement%5C_aws -[4]: #requirement%5C_external -[5]: #requirement%5C_local -[6]: #requirement%5C_template -[7]: #requirement%5C_utils -[8]: #provider%5C_aws -[9]: #module%5C_account%5C_map -[10]: #module%5C_permission%5C_sets -[11]: #module%5C_role%5C_prefix -[12]: #module%5C_sso%5C_account%5C_assignments -[13]: #module%5C_this -[14]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document -[15]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document -[16]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document -[17]: #input%5C_account%5C_assignments -[18]: #input%5C_additional%5C_tag%5C_map -[19]: #input%5C_attributes -[20]: #input%5C_context -[21]: #input%5C_delimiter -[22]: #input%5C_enabled -[23]: #input%5C_environment -[24]: #input%5C_global%5C_environment%5C_name -[25]: #input%5C_iam%5C_primary%5C_roles%5C_stage%5C_name -[26]: #input%5C_id%5C_length%5C_limit -[27]: #input%5C_identity%5C_roles%5C_accessible -[28]: #input%5C_label%5C_key%5C_case -[29]: #input%5C_label%5C_order -[30]: #input%5C_label%5C_value%5C_case -[31]: #input%5C_name -[32]: #input%5C_namespace -[33]: #input%5C_privileged -[34]: #input%5C_regex%5C_replace%5C_chars -[35]: #input%5C_region -[36]: #input%5C_root%5C_account%5C_stage%5C_name -[37]: #input%5C_stage -[38]: #input%5C_tags -[39]: https://github.com/cloudposse/terraform-aws-sso -[40]: https://cpco.io/component +[1]: https://docs.aws.amazon.com/singlesignon/latest/userguide/permissionsetsconcept.html +[2]: #requirement%5C_terraform +[3]: #requirement%5C_aws +[4]: #requirement%5C_external +[5]: #requirement%5C_local +[6]: #requirement%5C_template +[7]: #requirement%5C_utils +[8]: #provider%5C_aws +[9]: #module%5C_account%5C_map +[10]: #module%5C_permission%5C_sets +[11]: #module%5C_role%5C_prefix +[12]: #module%5C_sso%5C_account%5C_assignments +[13]: #module%5C_this +[14]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document +[15]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document +[16]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document +[17]: #input%5C_account%5C_assignments +[18]: #input%5C_additional%5C_tag%5C_map +[19]: #input%5C_attributes +[20]: #input%5C_context +[21]: #input%5C_delimiter +[22]: #input%5C_enabled +[23]: #input%5C_environment +[24]: #input%5C_global%5C_environment%5C_name +[25]: #input%5C_iam%5C_primary%5C_roles%5C_stage%5C_name +[26]: #input%5C_id%5C_length%5C_limit +[27]: #input%5C_identity%5C_roles%5C_accessible +[28]: #input%5C_label%5C_key%5C_case +[29]: #input%5C_label%5C_order +[30]: #input%5C_label%5C_value%5C_case +[31]: #input%5C_name +[32]: #input%5C_namespace +[33]: #input%5C_privileged +[34]: #input%5C_regex%5C_replace%5C_chars +[35]: #input%5C_region +[36]: #input%5C_root%5C_account%5C_stage%5C_name +[37]: #input%5C_stage +[38]: #input%5C_tags +[39]: https://github.com/cloudposse/terraform-aws-sso +[40]: https://cpco.io/component diff --git a/modules/aws-sso/additional-permission-sets.tf b/modules/aws-sso/additional-permission-sets.tf new file mode 100644 index 000000000..528a089ae --- /dev/null +++ b/modules/aws-sso/additional-permission-sets.tf @@ -0,0 +1,10 @@ +locals { + # If you have custom permission sets, override this declaration by creating + # a file called `additional-permission-sets_override.tf`. + # Then add the custom permission sets to the overridable_additional_permission_sets in that file. + # See the README for more details. + overridable_additional_permission_sets = [ + # Example + # local.audit_manager_permission_set, + ] +} diff --git a/modules/aws-sso/default.auto.tfvars b/modules/aws-sso/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/aws-sso/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/aws-sso/main.tf b/modules/aws-sso/main.tf index ec0775976..55d82ba3d 100644 --- a/modules/aws-sso/main.tf +++ b/modules/aws-sso/main.tf @@ -1,44 +1,9 @@ -module "permission_sets" { - source = "cloudposse/sso/aws//modules/permission-sets" - version = "0.6.2" - - permission_sets = concat( - local.administrator_access_permission_set, - local.billing_administrator_access_permission_set, - local.billing_read_only_access_permission_set, - local.dns_administrator_access_permission_set, - local.identity_access_permission_sets, - local.poweruser_access_permission_set, - local.read_only_access_permission_set, - ) - - context = module.this.context -} - -module "sso_account_assignments" { - source = "cloudposse/sso/aws//modules/account-assignments" - version = "0.6.2" - - account_assignments = local.account_assignments - context = module.this.context -} - -module "sso_account_assignments_root" { - source = "cloudposse/sso/aws//modules/account-assignments" - version = "0.6.2" - - providers = { - aws = aws.root - } - - account_assignments = local.account_assignments_root - context = module.this.context -} - locals { enabled = module.this.enabled - account_map = module.account_map.outputs.full_account_map + account_map = module.account_map.outputs.full_account_map + root_account = local.account_map[module.account_map.outputs.root_account_account_name] + account_assignments_groups = flatten([ for account_key, account in var.account_assignments : [ for principal_key, principal in account.groups : [ @@ -57,12 +22,12 @@ locals { account_assignments_groups_no_root = [ for val in local.account_assignments_groups : val - if val.account != local.account_map["root"] + if val.account != local.root_account ] account_assignments_groups_only_root = [ for val in local.account_assignments_groups : val - if val.account == local.account_map["root"] + if val.account == local.root_account ] account_assignments_users = flatten([ for account_key, account in var.account_assignments : [ @@ -81,12 +46,12 @@ locals { account_assignments_users_no_root = [ for val in local.account_assignments_users : val - if val.account != local.account_map["root"] + if val.account != local.root_account ] account_assignments_users_only_root = [ for val in local.account_assignments_users : val - if val.account == local.account_map["root"] + if val.account == local.root_account ] account_assignments = concat(local.account_assignments_groups_no_root, local.account_assignments_users_no_root) @@ -95,4 +60,66 @@ locals { aws_partition = data.aws_partition.current.partition } +data "aws_ssoadmin_instances" "this" {} + data "aws_partition" "current" {} + +resource "aws_identitystore_group" "manual" { + for_each = toset(var.groups) + + display_name = each.key + description = "Group created with Terraform" + + identity_store_id = tolist(data.aws_ssoadmin_instances.this.identity_store_ids)[0] +} + +module "permission_sets" { + source = "cloudposse/sso/aws//modules/permission-sets" + version = "1.1.1" + + permission_sets = concat( + local.overridable_additional_permission_sets, + local.administrator_access_permission_set, + local.billing_administrator_access_permission_set, + local.billing_read_only_access_permission_set, + local.dns_administrator_access_permission_set, + local.identity_access_permission_sets, + local.poweruser_access_permission_set, + local.read_only_access_permission_set, + local.terraform_update_access_permission_set, + ) + + context = module.this.context + + depends_on = [ + aws_identitystore_group.manual + ] +} + +module "sso_account_assignments" { + source = "cloudposse/sso/aws//modules/account-assignments" + version = "1.1.1" + + account_assignments = local.account_assignments + context = module.this.context + + depends_on = [ + aws_identitystore_group.manual + ] +} + +module "sso_account_assignments_root" { + source = "cloudposse/sso/aws//modules/account-assignments" + version = "1.1.1" + + providers = { + aws = aws.root + } + + account_assignments = local.account_assignments_root + context = module.this.context + + depends_on = [ + aws_identitystore_group.manual + ] +} diff --git a/modules/aws-sso/outputs.tf b/modules/aws-sso/outputs.tf new file mode 100644 index 000000000..e0c154510 --- /dev/null +++ b/modules/aws-sso/outputs.tf @@ -0,0 +1,14 @@ +output "permission_sets" { + value = module.permission_sets.permission_sets + description = "Permission sets" +} + +output "sso_account_assignments" { + value = module.sso_account_assignments.assignments + description = "SSO account assignments" +} + +output "group_ids" { + value = { for group_key, group_output in aws_identitystore_group.manual : group_key => group_output.group_id } + description = "Group IDs created for Identity Center" +} diff --git a/modules/aws-sso/policy-AdminstratorAccess.tf b/modules/aws-sso/policy-AdminstratorAccess.tf index c88ad4890..afd5463ae 100644 --- a/modules/aws-sso/policy-AdminstratorAccess.tf +++ b/modules/aws-sso/policy-AdminstratorAccess.tf @@ -1,11 +1,12 @@ locals { administrator_access_permission_set = [{ - name = "AdministratorAccess", - description = "Allow Full Admininstrator access to the account", - relay_state = "", - session_duration = "", - tags = {}, - inline_policy = "" - policy_attachments = ["arn:${local.aws_partition}:iam::aws:policy/AdministratorAccess"] + name = "AdministratorAccess", + description = "Allow Full Administrator access to the account", + relay_state = "", + session_duration = "", + tags = {}, + inline_policy = "" + policy_attachments = ["arn:${local.aws_partition}:iam::aws:policy/AdministratorAccess"] + customer_managed_policy_attachments = [] }] } diff --git a/modules/aws-sso/policy-BillingAdministratorAccess.tf b/modules/aws-sso/policy-BillingAdministratorAccess.tf index 417d72f79..0e854f192 100644 --- a/modules/aws-sso/policy-BillingAdministratorAccess.tf +++ b/modules/aws-sso/policy-BillingAdministratorAccess.tf @@ -10,5 +10,6 @@ locals { "arn:${local.aws_partition}:iam::aws:policy/job-function/Billing", "arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess", ] + customer_managed_policy_attachments = [] }] } diff --git a/modules/aws-sso/policy-BillingReadOnlyAccess.tf b/modules/aws-sso/policy-BillingReadOnlyAccess.tf index e9dced8aa..732e90aa6 100644 --- a/modules/aws-sso/policy-BillingReadOnlyAccess.tf +++ b/modules/aws-sso/policy-BillingReadOnlyAccess.tf @@ -10,5 +10,6 @@ locals { "arn:${local.aws_partition}:iam::aws:policy/AWSBillingReadOnlyAccess", "arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess", ] + customer_managed_policy_attachments = [] }] } diff --git a/modules/aws-sso/policy-DNSAdministratorAccess.tf b/modules/aws-sso/policy-DNSAdministratorAccess.tf index c3d810f48..2cb507ef8 100644 --- a/modules/aws-sso/policy-DNSAdministratorAccess.tf +++ b/modules/aws-sso/policy-DNSAdministratorAccess.tf @@ -27,12 +27,13 @@ data "aws_iam_policy_document" "dns_administrator_access" { locals { dns_administrator_access_permission_set = [{ - name = "DNSRecordAdministratorAccess", - description = "Allow DNS Record Admininstrator access to the account, but not zone administration", - relay_state = "https://console.aws.amazon.com/route53/", - session_duration = "", - tags = {}, - inline_policy = data.aws_iam_policy_document.dns_administrator_access.json, - policy_attachments = ["arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess"] + name = "DNSRecordAdministratorAccess", + description = "Allow DNS Record Administrator access to the account, but not zone administration", + relay_state = "https://console.aws.amazon.com/route53/", + session_duration = "", + tags = {}, + inline_policy = data.aws_iam_policy_document.dns_administrator_access.json, + policy_attachments = ["arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess"] + customer_managed_policy_attachments = [] }] } diff --git a/modules/aws-sso/policy-Identity-role-RoleAccess.tf b/modules/aws-sso/policy-Identity-role-RoleAccess.tf deleted file mode 100644 index 1a2174a36..000000000 --- a/modules/aws-sso/policy-Identity-role-RoleAccess.tf +++ /dev/null @@ -1,65 +0,0 @@ - -# This file generates a permission set for each role specified in var.target_identity_roles -# which is named "IdentityRoleAccess" and grants access to only that role, -# plus ViewOnly access because it is difficult to navigate without any access at all. - -locals { - identity_account = module.account_map.outputs.full_account_map[var.iam_primary_roles_stage_name] -} - -module "role_prefix" { - source = "cloudposse/label/null" - version = "0.25.0" - - stage = var.iam_primary_roles_stage_name - - context = module.this.context -} - -data "aws_iam_policy_document" "assume_identity_role" { - for_each = local.enabled ? var.identity_roles_accessible : [] - - statement { - sid = "RoleAssumeRole" - - effect = "Allow" - actions = [ - "sts:AssumeRole", - "sts:TagSession", - ] - - resources = [ - format("arn:${local.aws_partition}:iam::%s:role/%s-%s", local.identity_account, module.role_prefix.id, each.value) - ] - - /* For future reference, this tag-based restriction also works, based on - the fact that we always tag our IAM roles with the "Name" tag. - This could be used to control access based on some other tag, like "Category", - so is left here as an example. - - condition { - test = "ForAllValues:StringEquals" - variable = "iam:ResourceTag/Name" # "Name" is the Tag Key - values = [format("%s-%s", module.role_prefix.id, each.value)] - } - resources = [ - # This allows/restricts access to only IAM roles, not users or SSO roles - format("arn:aws:iam::%s:role/*", local.identity_account) - ] - - */ - - } -} - -locals { - identity_access_permission_sets = [for role in var.identity_roles_accessible : { - name = format("Identity%sRoleAccess", title(role)), - description = "Allow user to assume %s role in Identity account, which allows access to other accounts", - relay_state = "", - session_duration = "", - tags = {}, - inline_policy = data.aws_iam_policy_document.assume_identity_role[role].json - policy_attachments = ["arn:${local.aws_partition}:iam::aws:policy/job-function/ViewOnlyAccess"] - }] -} diff --git a/modules/aws-sso/policy-Identity-role-TeamAccess.tf b/modules/aws-sso/policy-Identity-role-TeamAccess.tf new file mode 100644 index 000000000..371b293a3 --- /dev/null +++ b/modules/aws-sso/policy-Identity-role-TeamAccess.tf @@ -0,0 +1,61 @@ + +# This file generates a permission set for each role specified in var.target_identity_roles +# which is named "IdentityTeamAccess" and grants access to only that role, +# plus ViewOnly access because it is difficult to navigate without any access at all. + +data "aws_iam_policy_document" "assume_aws_team" { + for_each = local.enabled ? var.aws_teams_accessible : [] + + statement { + sid = "RoleAssumeRole" + + effect = "Allow" + actions = [ + "sts:AssumeRole", + "sts:SetSourceIdentity", + "sts:TagSession", + ] + + resources = ["*"] + + /* For future reference, this tag-based restriction also works, based on + the fact that we always tag our IAM roles with the "Name" tag. + This could be used to control access based on some other tag, like "Category", + so is left here as an example. + + condition { + test = "ForAllValues:StringEquals" + variable = "iam:ResourceTag/Name" # "Name" is the Tag Key + values = [format("%s-%s", module.role_prefix.id, each.value)] + } + resources = [ + # This allows/restricts access to only IAM roles, not users or SSO roles + format("arn:aws:iam::%s:role/*", local.identity_account) + ] + + */ + + } +} + +module "role_map" { + source = "../account-map/modules/roles-to-principals" + + teams = var.aws_teams_accessible + privileged = var.privileged + + context = module.this.context +} + +locals { + identity_access_permission_sets = [for role in var.aws_teams_accessible : { + name = module.role_map.team_permission_set_name_map[role], + description = format("Allow user to assume the %s Team role in the Identity account, which allows access to other accounts", replace(title(role), "-", "")) + relay_state = "", + session_duration = "", + tags = {}, + inline_policy = data.aws_iam_policy_document.assume_aws_team[role].json + policy_attachments = ["arn:${local.aws_partition}:iam::aws:policy/job-function/ViewOnlyAccess"] + customer_managed_policy_attachments = [] + }] +} diff --git a/modules/aws-sso/policy-PoweruserAccess.tf b/modules/aws-sso/policy-PoweruserAccess.tf index 58374a961..8b21d0cf9 100644 --- a/modules/aws-sso/policy-PoweruserAccess.tf +++ b/modules/aws-sso/policy-PoweruserAccess.tf @@ -10,5 +10,6 @@ locals { "arn:${local.aws_partition}:iam::aws:policy/PowerUserAccess", "arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess", ] + customer_managed_policy_attachments = [] }] } diff --git a/modules/aws-sso/policy-ReadOnlyAccess.tf b/modules/aws-sso/policy-ReadOnlyAccess.tf index cc03f8499..88660787d 100644 --- a/modules/aws-sso/policy-ReadOnlyAccess.tf +++ b/modules/aws-sso/policy-ReadOnlyAccess.tf @@ -5,10 +5,27 @@ locals { relay_state = "", session_duration = "", tags = {}, - inline_policy = "" + inline_policy = data.aws_iam_policy_document.eks_read_only.json, policy_attachments = [ "arn:${local.aws_partition}:iam::aws:policy/ReadOnlyAccess", - "arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess", + "arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess" ] + customer_managed_policy_attachments = [] }] } + +data "aws_iam_policy_document" "eks_read_only" { + statement { + sid = "AllowEKSView" + effect = "Allow" + actions = [ + "eks:Get*", + "eks:Describe*", + "eks:List*", + "eks:Access*" + ] + resources = [ + "*" + ] + } +} diff --git a/modules/aws-sso/policy-TerraformUpdateAccess.tf b/modules/aws-sso/policy-TerraformUpdateAccess.tf new file mode 100644 index 000000000..095d64d5e --- /dev/null +++ b/modules/aws-sso/policy-TerraformUpdateAccess.tf @@ -0,0 +1,56 @@ +variable "tfstate_environment_name" { + type = string + description = "The name of the environment where `tfstate-backend` is provisioned. If not set, the TerraformUpdateAccess permission set will not be created." + default = null +} + +locals { + tf_update_access_enabled = var.tfstate_environment_name != null && module.this.enabled +} + +module "tfstate" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + bypass = !local.tf_update_access_enabled + + component = "tfstate-backend" + environment = var.tfstate_environment_name + stage = module.iam_roles.global_stage_name + privileged = var.privileged + + context = module.this.context +} + +data "aws_iam_policy_document" "terraform_update_access" { + count = local.tf_update_access_enabled ? 1 : 0 + + statement { + sid = "TerraformStateBackendS3Bucket" + effect = "Allow" + actions = ["s3:ListBucket", "s3:GetObject", "s3:PutObject"] + resources = module.this.enabled ? [ + module.tfstate.outputs.tfstate_backend_s3_bucket_arn, + "${module.tfstate.outputs.tfstate_backend_s3_bucket_arn}/*" + ] : [] + } + statement { + sid = "TerraformStateBackendDynamoDbTable" + effect = "Allow" + actions = ["dynamodb:GetItem", "dynamodb:PutItem", "dynamodb:DeleteItem"] + resources = module.this.enabled ? [module.tfstate.outputs.tfstate_backend_dynamodb_table_arn] : [] + } +} + +locals { + terraform_update_access_permission_set = local.tf_update_access_enabled ? [{ + name = "TerraformUpdateAccess", + description = "Allow access to Terraform state sufficient to make changes", + relay_state = "", + session_duration = "PT1H", # One hour, maximum allowed for chained assumed roles + tags = {}, + inline_policy = one(data.aws_iam_policy_document.terraform_update_access[*].json), + policy_attachments = [] + customer_managed_policy_attachments = [] + }] : [] +} diff --git a/modules/aws-sso/providers.tf b/modules/aws-sso/providers.tf index 9188ea7dc..fb0b204f5 100644 --- a/modules/aws-sso/providers.tf +++ b/modules/aws-sso/providers.tf @@ -1,27 +1,75 @@ +# This component is unusual in that part of it must be deployed to the `root` +# account. You have the option of where to deploy the remaining part, and +# Cloud Posse recommends you deploy it also to the `root` account, however +# it can be deployed to the `identity` account instead. In the discussion +# below, when we talk about where this module is being deployed, we are +# referring to the part of the module that is not deployed to the `root` +# account and is configured by setting `stage` etc.. + +# If you have Dynamic Terraform Roles enabled, leave the backend `role_arn` at +# its default value. If deploying only to the `root` account, leave `privileged: false` +# and use either SuperAdmin or an appropriate `aws-team` (such as `managers`). +# If deploying to the `identity` account, set `privileged: true` +# and use SuperAdmin or any other role in the `root` account with Admin access. +# +# For those not using dynamic Terraform roles: +# +# Set the stack configuration for this component to set `privileged: true` +# and backend `role_arn` to `null`, and deploy it using either the SuperAdmin +# role or any other role in the `root` account with Admin access. +# +# If you are deploying this to the "identity" account and have a team empowered +# to deploy to both the "identity" and "root" accounts, then you have the option to set +# `privileged: false` and leave the backend `role_arn` at its default value, but +# then SuperAdmin will not be able to deploy this component, +# only the team with access to both accounts will be able to deploy it. +# + provider "aws" { region = var.region + profile = !var.privileged && module.iam_roles.profiles_enabled ? module.iam_roles.terraform_profile_name : null dynamic "assume_role" { - for_each = module.iam_roles.org_role_arn != null ? [true] : [] + for_each = !var.privileged && module.iam_roles.profiles_enabled ? [] : ( + var.privileged ? compact([module.iam_roles.org_role_arn]) : compact([module.iam_roles.terraform_role_arn]) + ) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.org_role_arn) + role_arn = assume_role.value } } } + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + privileged = var.privileged + + context = module.this.context +} + provider "aws" { alias = "root" region = var.region -} -module "iam_roles" { - source = "../account-map/modules/iam-roles" - privileged = true - context = module.this.context + profile = !var.privileged && module.iam_roles_root.profiles_enabled ? module.iam_roles_root.terraform_profile_name : null + dynamic "assume_role" { + for_each = !var.privileged && module.iam_roles_root.profiles_enabled ? [] : ( + var.privileged ? compact([module.iam_roles_root.org_role_arn]) : compact([module.iam_roles_root.terraform_role_arn]) + ) + content { + role_arn = assume_role.value + } + } } -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" + +module "iam_roles_root" { + source = "../account-map/modules/iam-roles" + + privileged = var.privileged + tenant = module.iam_roles.global_tenant_name + stage = module.iam_roles.global_stage_name + environment = module.iam_roles.global_environment_name + + context = module.this.context } diff --git a/modules/aws-sso/remote-state.tf b/modules/aws-sso/remote-state.tf index 6e8b0215f..3e818ad3f 100644 --- a/modules/aws-sso/remote-state.tf +++ b/modules/aws-sso/remote-state.tf @@ -1,10 +1,11 @@ module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" component = "account-map" - environment = var.global_environment_name - stage = var.root_account_stage_name + environment = module.iam_roles.global_environment_name + stage = module.iam_roles.global_stage_name + tenant = module.iam_roles.global_tenant_name privileged = var.privileged context = module.this.context diff --git a/modules/aws-sso/variables.tf b/modules/aws-sso/variables.tf index 02001bb74..b8b41210e 100644 --- a/modules/aws-sso/variables.tf +++ b/modules/aws-sso/variables.tf @@ -3,21 +3,10 @@ variable "region" { description = "AWS Region" } -variable "global_environment_name" { - type = string - description = "Global environment name" - default = "gbl" -} -variable "root_account_stage_name" { - type = string - description = "The name of the stage where `account_map` is provisioned" - default = "root" -} - variable "privileged" { type = bool - description = "True if the default provider already has access to the backend" - default = true + description = "True if the user running the Terraform command already has access to the Terraform backend" + default = false } variable "account_assignments" { @@ -44,18 +33,22 @@ variable "account_assignments" { default = {} } -variable "iam_primary_roles_stage_name" { - type = string - description = "The name of the stage where the IAM primary roles are provisioned" - default = "identity" -} - -variable "identity_roles_accessible" { +variable "aws_teams_accessible" { type = set(string) description = <<-EOT List of IAM roles (e.g. ["admin", "terraform"]) for which to create permission sets that allow the user to assume that role. Named like - admin -> IdentityAdminRoleAccess + admin -> IdentityAdminTeamAccess + EOT + default = [] +} + +variable "groups" { + type = list(string) + description = <<-EOT + List of AWS Identity Center Groups to be created with the AWS API. + + When provisioning the Google Workspace Integration with AWS, Groups need to be created with API in order for automatic provisioning to work as intended. EOT default = [] } diff --git a/modules/aws-sso/versions.tf b/modules/aws-sso/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/aws-sso/versions.tf +++ b/modules/aws-sso/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/aws-ssosync/README.md b/modules/aws-ssosync/README.md new file mode 100644 index 000000000..c31ab93ce --- /dev/null +++ b/modules/aws-ssosync/README.md @@ -0,0 +1,288 @@ +--- +tags: + - component/aws-ssosync + - layer/identity + - provider/aws +--- + +# Component: `aws-ssosync` + +Deploys [AWS ssosync](https://github.com/awslabs/ssosync) to sync Google Groups with AWS SSO. + +AWS `ssosync` is a Lambda application that regularly manages Identity Store users. + +This component requires manual deployment by a privileged user because it deploys a role in the root or identity +management account. + +## Usage + +You should be able to deploy the `aws-ssosync` component to the same account as `aws-sso`. Typically that is the +`core-gbl-root` or `gbl-root` stack. + +**Stack Level**: Global **Deployment**: Must be deployed by `managers` or SuperAdmin using `atmos` CLI + +The following is an example snippet for how to use this component: + +(`stacks/catalog/aws-ssosync.yaml`) + +```yaml +components: + terraform: + aws-ssosync: + vars: + enabled: true + name: aws-ssosync + google_admin_email: an-actual-admin@acme.com + ssosync_url_prefix: "https://github.com/Benbentwo/ssosync/releases/download" + ssosync_version: "2.0.2" + google_credentials_ssm_path: "/ssosync" + log_format: text + log_level: debug + schedule_expression: "rate(15 minutes)" +``` + +We recommend following a similar process to what the [AWS ssosync](https://github.com/awslabs/ssosync) documentation +recommends. + +### Deployment + +Overview of steps: + +1. Configure AWS IAM Identity Center +1. Configure Google Cloud console +1. Configure Google Admin console +1. Deploy the `aws-ssosync` component +1. Deploy the `aws-sso` component + +#### 1. Configure AWS IAM Identity Center (AWS SSO) + +Follow +[AWS documentation to configure SAML and SCIM with Google Workspace and IAM Identity Center](https://docs.aws.amazon.com/singlesignon/latest/userguide/gs-gwp.html). + +As part of this process, save the SCIM endpoint token and URL. Then in AWS SSM Parameter Store, create two +`SecureString` parameters in the same account used for AWS SSO. This is usually the root account in the primary region. + +``` +/ssosync/scim_endpoint_access_token +/ssosync/scim_endpoint_url +``` + +One more parameter you'll need is your Identity Store ID. To obtain your Identity Store ID, go to the AWS Identity +Center console and select `Settings`. Under the `Identity Source` section, copy the Identity Store ID. In the same +account used for AWS SSO, create the following parameter: + +``` +/ssosync/identity_store_id +``` + +#### 2. Configure Google Cloud console + +Within the Google Cloud console, we need to create a new Google Project and Service Account and enable the Admin SDK +API. Follow these steps: + +1. Open the Google Cloud
console: https://console.cloud.google.com +2. Create a new project. Give the project a descriptive name such as `AWS SSO Sync` +3. Enable Admin SDK in APIs: `APIs & Services > Enabled APIs & Services > + ENABLE APIS AND SERVICES` + +![Enable Admin SDK](https://raw.githubusercontent.com/cloudposse/terraform-aws-components/main/modules/aws-ssosync/docs/img/admin_sdk.png) # +use raw URL so that this works in both GitHub and docusaurus + +4. Create Service Account: `IAM & Admin > Service Accounts > Create Service Account` + [(ref)](https://cloud.google.com/iam/docs/service-accounts-create). + +![Create Service Account](https://raw.githubusercontent.com/cloudposse/terraform-aws-components/main/modules/aws-ssosync/docs/img/create_service_account.png) # +use raw URL so that this works in both GitHub and docusaurus + +5. Download credentials for the new Service Account: + `IAM & Admin > Service Accounts > select Service Account > Keys > ADD KEY > Create new key > JSON` + +![Download Credentials](https://raw.githubusercontent.com/cloudposse/terraform-aws-components/main/modules/aws-ssosync/docs/img/dl_service_account_creds.png) # +use raw URL so that this works in both GitHub and docusaurus + +6. Save the JSON credentials as a new `SecureString` AWS SSM parameter in the same account used for AWS SSO. Use the + full JSON string as the value for the parameter. + +``` +/ssosync/google_credentials +``` + +#### 3. Configure Google Admin console + +- Open the Google Admin console +- From your domain’sΒ Admin console, go toΒ `Main menu menu > Security > Access and data control > API controls` + [(ref)](https://developers.google.com/cloud-search/docs/guides/delegation) +- In theΒ Domain wide delegationΒ pane, selectΒ `Manage Domain Wide Delegation`. +- ClickΒ `Add new`. +- In the Client ID field, enter the client ID obtained from the service account creation steps above. +- In the OAuth Scopes field, enter a comma-delimited list of the scopes required for your application. Use the scope + `https://www.googleapis.com/auth/cloud_search.query` for search applications using the Query API. +- Add the following permission: [(ref)](https://github.com/awslabs/ssosync?tab=readme-ov-file#google) + +```console +https://www.googleapis.com/auth/admin.directory.group.readonly +https://www.googleapis.com/auth/admin.directory.group.member.readonly +https://www.googleapis.com/auth/admin.directory.user.readonly +``` + +#### 4. Deploy the `aws-ssosync` component + +Make sure that all four of the following SSM parameters exist in the target account and region: + +- `/ssosync/scim_endpoint_url` +- `/ssosync/scim_endpoint_access_token` +- `/ssosync/identity_store_id` +- `/ssosync/google_credentials` + +If deployed successfully, Groups and Users should be programmatically copied from the Google Workspace into AWS IAM +Identity Center on the given schedule. + +If these Groups are not showing up, check the CloudWatch logs for the new Lambda function and refer the [FAQs](#FAQ) +included below. + +#### 5. Deploy the `aws-sso` component + +Use the names of the Groups now provisioned programmatically in the `aws-sso` component catalog. Follow the +[aws-sso](../aws-sso/) component documentation to deploy the `aws-sso` component. + +### FAQ + +#### Why is the tool forked by `Benbentwo`? + +The `awslabs` tool requires AWS Secrets Managers for the Google Credentials. However, we would prefer to use AWS SSM to +store all credentials consistency and not require AWS Secrets Manager. Therefore we've created a Pull Request and will +point to a fork until the PR is merged. + +Ref: + +- https://github.com/awslabs/ssosync/pull/133 +- https://github.com/awslabs/ssosync/issues/93 + +#### What should I use for the Google Admin Email Address? + +The Service Account created will assume the User given by `--google-admin` / `SSOSYNC_GOOGLE_ADMIN` / +`var.google_admin_email`. Therefore, this user email must be a valid Google admin user in your organization. + +This is not the same email as the Service Account. + +If Google fails to query Groups, you may see the following error: + +```console +Notifying Lambda and mark this execution as Failure: googleapi: Error 404: Domain not found., notFound +``` + +#### Common Group Name Query Error + +If filtering group names using query strings, make sure the provided string is valid. For example, +`google_group_match: "name:aws*"` is incorrect. Instead use `google_group_match: "Name:aws*"` + +If not, you may again see the same error message: + +```console +Notifying Lambda and mark this execution as Failure: googleapi: Error 404: Domain not found., notFound +``` + +Ref: + +> The specific error you are seeing is because the google api doesn't like the query string you provided for the -g +> parameter. try -g "Name:Fuel\*" + +https://github.com/awslabs/ssosync/issues/91 + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [archive](#requirement\_archive) | >= 2.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [null](#requirement\_null) | >= 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [archive](#provider\_archive) | >= 2.3.0 | +| [aws](#provider\_aws) | >= 4.0 | +| [null](#provider\_null) | >= 3.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [ssosync\_artifact](#module\_ssosync\_artifact) | cloudposse/module-artifact/external | 0.8.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_event_rule.ssosync](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_target.ssosync](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_iam_role.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_lambda_function.ssosync](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | +| [aws_lambda_permission.allow_cloudwatch_execution](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | +| [null_resource.extract_my_tgz](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [archive_file.lambda](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source | +| [aws_iam_policy_document.ssosync_lambda_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.ssosync_lambda_identity_center](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_ssm_parameter.google_credentials](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.identity_store_id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.scim_endpoint_access_token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.scim_endpoint_url](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [architecture](#input\_architecture) | Architecture of the Lambda function | `string` | `"x86_64"` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [google\_admin\_email](#input\_google\_admin\_email) | Google Admin email | `string` | n/a | yes | +| [google\_credentials\_ssm\_path](#input\_google\_credentials\_ssm\_path) | SSM Path for `ssosync` secrets | `string` | `"/ssosync"` | no | +| [google\_group\_match](#input\_google\_group\_match) | Google Workspace group filter query parameter, example: 'name:Admin* email:aws-*', see: https://developers.google.com/admin-sdk/directory/v1/guides/search-groups | `string` | `""` | no | +| [google\_user\_match](#input\_google\_user\_match) | Google Workspace user filter query parameter, example: 'name:John* email:admin*', see: https://developers.google.com/admin-sdk/directory/v1/guides/search-users | `string` | `""` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [ignore\_groups](#input\_ignore\_groups) | Ignore these Google Workspace groups | `string` | `""` | no | +| [ignore\_users](#input\_ignore\_users) | Ignore these Google Workspace users | `string` | `""` | no | +| [include\_groups](#input\_include\_groups) | Include only these Google Workspace groups. (Only applicable for sync\_method user\_groups) | `string` | `""` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [log\_format](#input\_log\_format) | Log format for Lambda function logging | `string` | `"json"` | no | +| [log\_level](#input\_log\_level) | Log level for Lambda function logging | `string` | `"warn"` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region where AWS SSO is enabled | `string` | n/a | yes | +| [schedule\_expression](#input\_schedule\_expression) | Schedule for trigger the execution of ssosync (see CloudWatch schedule expressions) | `string` | `"rate(15 minutes)"` | no | +| [ssosync\_url\_prefix](#input\_ssosync\_url\_prefix) | URL prefix for ssosync binary | `string` | `"https://github.com/Benbentwo/ssosync/releases/download"` | no | +| [ssosync\_version](#input\_ssosync\_version) | Version of ssosync to use | `string` | `"v2.0.2"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [sync\_method](#input\_sync\_method) | Sync method to use | `string` | `"groups"` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [arn](#output\_arn) | ARN of the lambda function | +| [invoke\_arn](#output\_invoke\_arn) | Invoke ARN of the lambda function | +| [qualified\_arn](#output\_qualified\_arn) | ARN identifying your Lambda Function Version (if versioning is enabled via publish = true) | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/aws-ssosync) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/aws-ssosync/context.tf b/modules/aws-ssosync/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/aws-ssosync/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/aws-ssosync/dist/README.md b/modules/aws-ssosync/dist/README.md new file mode 100644 index 000000000..82c426e7c --- /dev/null +++ b/modules/aws-ssosync/dist/README.md @@ -0,0 +1,211 @@ +# Fork of AWS SSO Sync +Removes need for ASM Secrets + + +# SSO Sync + +![Github Action](https://github.com/awslabs/ssosync/workflows/main/badge.svg) +![gopherbadger-tag-do-not-edit](https://img.shields.io/badge/Go%20Coverage-42%25-brightgreen.svg?longCache=true&style=flat) +[![Go Report Card](https://goreportcard.com/badge/github.com/awslabs/ssosync)](https://goreportcard.com/report/github.com/awslabs/ssosync) +[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) +[![Taylor Swift](https://img.shields.io/badge/secured%20by-taylor%20swift-brightgreen.svg)](https://twitter.com/SwiftOnSecurity) + +> Helping you populate AWS SSO directly with your Google Apps users + +SSO Sync will run on any platform that Go can build for. It is available in the [AWS Serverless Application Repository](https://console.aws.amazon.com/lambda/home#/create/app?applicationId=arn:aws:serverlessrepo:us-east-2:004480582608:applications/SSOSync) + +> :warning: there are breaking changes for versions `>= 0.02` + +> :warning: `>= 1.0.0-rc.5` groups to do not get deleted in AWS SSO when deleted in the Google Directory, and groups are synced by their email address + +> :warning: `>= 2.0.0` this makes use of the **Identity Store API** which means: +* if deploying the lambda from the [AWS Serverless Application Repository](https://console.aws.amazon.com/lambda/home#/create/app?applicationId=arn:aws:serverlessrepo:us-east-2:004480582608:applications/SSOSync) then it needs to be deployed into the [IAM Identity Center delegated administration](https://docs.aws.amazon.com/singlesignon/latest/userguide/delegated-admin.html) account. Technically you could deploy in the management account but we would recommend against this. +* if you are running the project as a cli tool, then the environment will need to be using credentials of a user in the [IAM Identity Center delegated administration](https://docs.aws.amazon.com/singlesignon/latest/userguide/delegated-admin.html) account, with appropriate permissions. + +## Why? + +As per the [AWS SSO](https://aws.amazon.com/single-sign-on/) Homepage: + +> AWS Single Sign-On (SSO) makes it easy to centrally manage access +> to multiple AWS accounts and business applications and provide users +> with single sign-on access to all their assigned accounts and applications +> from one place. + +Key part further down: + +> With AWS SSO, you can create and manage user identities in AWS SSO’s +>identity store, or easily connect to your existing identity source including +> Microsoft Active Directory and **Azure Active Directory (Azure AD)**. + +AWS SSO can use other Identity Providers as well... such as Google Apps for Domains. Although AWS SSO +supports a subset of the SCIM protocol for populating users, it currently only has support for Azure AD. + +This project provides a CLI tool to pull users and groups from Google and push them into AWS SSO. +`ssosync` deals with removing users as well. The heavily commented code provides you with the detail of +what it is going to do. + +### References + + * [SCIM Protocol RFC](https://tools.ietf.org/html/rfc7644) + * [AWS SSO - Connect to Your External Identity Provider](https://docs.aws.amazon.com/singlesignon/latest/userguide/manage-your-identity-source-idp.html) + * [AWS SSO - Automatic Provisioning](https://docs.aws.amazon.com/singlesignon/latest/userguide/provision-automatically.html) + * [AWS IAM Identity Center - Identity Store API](https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/welcome.html) + +## Installation + +The recommended installation is: +* [Setup IAM Identity Center](https://docs.aws.amazon.com/singlesignon/latest/userguide/get-started-enable-identity-center.html), in the management account of your organization +* Created a linked account `Identity` Account from which to manage IAM Identity Center +* [Delegate administration](https://docs.aws.amazon.com/singlesignon/latest/userguide/delegated-admin.html) to the `Identity' account +* Deploy the [SSOSync app](https://console.aws.amazon.com/lambda/home#/create/app?applicationId=arn:aws:serverlessrepo:us-east-2:004480582608:applications/SSOSync) from the AWS Serverless Application Repository + + +You can also: +You can `go get github.com/awslabs/ssosync` or grab a Release binary from the release page. The binary +can be used from your local computer, or you can deploy to AWS Lambda to run on a CloudWatch Event +for regular synchronization. + +## Configuration + +You need a few items of configuration. One side from AWS, and the other +from Google Cloud to allow for API access to each. You should have configured +Google as your Identity Provider for AWS SSO already. + +You will need the files produced by these steps for AWS Lambda deployment as well +as locally running the ssosync tool. + +### Google + +First, you have to setup your API. In the project you want to use go to the [Console](https://console.developers.google.com/apis) and select *API & Services* > *Enable APIs and Services*. Search for *Admin SDK* and *Enable* the API. + +You have to perform this [tutorial](https://developers.google.com/admin-sdk/directory/v1/guides/delegation) to create a service account that you use to sync your users. Save the `JSON file` you create during the process and rename it to `credentials.json`. + +> you can also use the `--google-credentials` parameter to explicitly specify the file with the service credentials. Please, keep this file safe, or store it in the AWS Secrets Manager + +In the domain-wide delegation for the Admin API, you have to specify the following scopes for the user. + +* https://www.googleapis.com/auth/admin.directory.group.readonly +* https://www.googleapis.com/auth/admin.directory.group.member.readonly +* https://www.googleapis.com/auth/admin.directory.user.readonly + +Back in the Console go to the Dashboard for the API & Services and select "Enable API and Services". +In the Search box type `Admin` and select the `Admin SDK` option. Click the `Enable` button. + +You will have to specify the email address of an admin via `--google-admin` to assume this users role in the Directory. + +### AWS + +Go to the AWS Single Sign-On console in the region you have set up AWS SSO and select +Settings. Click `Enable automatic provisioning`. + +A pop up will appear with URL and the Access Token. The Access Token will only appear +at this stage. You want to copy both of these as a parameter to the `ssosync` command. + +Or you specific these as environment variables. + +```bash +SSOSYNC_SCIM_ACCESS_TOKEN= +SSOSYNC_SCIM_ENDPOINT= +``` + +Additionally, authenticate your AWS credentials. Follow this [section](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#:~:text=Creating%20the%20Credentials%20File) to create a Shared Credentials File in the home directory or export your Credentials with Environment Variables. Ensure that the default credentials are for the AWS account you intended to be synced. + +To obtain your `Identity store ID`, go to the AWS Identity Center console and select settings. Under the `Identity Source` section, copy the `Identity store ID`. + +## Local Usage + +```bash +git clone https://github.com/awslabs/ssosync.git +cd ssosync/ +make go-build +``` + +```bash +./ssosync --help +``` + +```bash +A command line tool to enable you to synchronise your Google +Apps (Google Workspace) users to AWS Single Sign-on (AWS SSO) +Complete documentation is available at https://github.com/awslabs/ssosync + +Usage: + ssosync [flags] + +Flags: + -t, --access-token string AWS SSO SCIM API Access Token + -d, --debug enable verbose / debug logging + -e, --endpoint string AWS SSO SCIM API Endpoint + -u, --google-admin string Google Workspace admin user email + -c, --google-credentials string path to Google Workspace credentials file (default "credentials.json") + -g, --group-match string Google Workspace Groups filter query parameter, example: 'name:Admin* email:aws-*', see: https://developers.google.com/admin-sdk/directory/v1/guides/search-groups + -h, --help help for ssosync + --ignore-groups strings ignores these Google Workspace groups + --ignore-users strings ignores these Google Workspace users + --include-groups strings include only these Google Workspace groups, NOTE: only works when --sync-method 'users_groups' + --log-format string log format (default "text") + --log-level string log level (default "info") + -s, --sync-method string Sync method to use (users_groups|groups) (default "groups") + -m, --user-match string Google Workspace Users filter query parameter, example: 'name:John* email:admin*', see: https://developers.google.com/admin-sdk/directory/v1/guides/search-users + -v, --version version for ssosync + -r, --region AWS region where identity store exists + -i, --identity-store-id AWS Identity Store ID +``` + +The function has `two behaviour` and these are controlled by the `--sync-method` flag, this behavior could be + +1. `groups`: __(default)__ The sync procedure work base on Groups, gets the Google Workspace groups and their members, then creates in AWS SSO the users (members of the Google Workspace groups), then the groups and at the end assign the users to their respective groups. +2. `users_groups`: __(original behavior, previous versions)__ The sync procedure is simple, gets the Google Workspace users and creates these in AWS SSO Users; then gets Google Workspace groups and creates these in AWS SSO Groups and assigns users to belong to the AWS SSO Groups. + +Flags Notes: + +* `--include-groups` only works when `--sync-method` is `users_groups` +* `--ignore-users` works for both `--sync-method` values. Example: `--ignore-users user1@example.com,user2@example.com` or `SSOSYNC_IGNORE_USERS=user1@example.com,user2@example.com` +* `--ignore-groups` works for both `--sync-method` values. Example: --ignore-groups group1@example.com,group1@example.com` or `SSOSYNC_IGNORE_GROUPS=group1@example.com,group1@example.com` +* `--group-match` works for both `--sync-method` values and also in combination with `--ignore-groups` and `--ignore-users`. This is the filter query passed to the [Google Workspace Directory API when search Groups](https://developers.google.com/admin-sdk/directory/v1/guides/search-groups), if the flag is not used, groups are not filtered. +* `--user-match` works for both `--sync-method` values and also in combination with `--ignore-groups` and `--ignore-users`. This is the filter query passed to the [Google Workspace Directory API when search Users](https://developers.google.com/admin-sdk/directory/v1/guides/search-users), if the flag is not used, users are not filtered. + +NOTES: + +1. Depending on the number of users and groups you have, maybe you can get `AWS SSO SCIM API rate limits errors`, and more frequently happens if you execute the sync many times in a short time. +2. Depending on the number of users and groups you have, `--debug` flag generate too much logs lines in your AWS Lambda function. So test it in locally with the `--debug` flag enabled and disable it when you use a AWS Lambda function. + +## AWS Lambda Usage + +NOTE: Using Lambda may incur costs in your AWS account. Please make sure you have checked +the pricing for AWS Lambda and CloudWatch before continuing. + +Running ssosync once means that any changes to your Google directory will not appear in +AWS SSO. To sync. regularly, you can run ssosync via AWS Lambda. + +:warning: You find it in the [AWS Serverless Application Repository](https://eu-west-1.console.aws.amazon.com/lambda/home#/create/app?applicationId=arn:aws:serverlessrepo:us-east-2:004480582608:applications/SSOSync). + +## SAM + +You can use the AWS Serverless Application Model (SAM) to deploy this to your account. + +> Please, install the [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) and [GoReleaser](https://goreleaser.com/install/). + +Specify an Amazon S3 Bucket for the upload with `export S3_BUCKET=` and an S3 prefix with `export S3_PREFIX=`. + +Execute `make package` in the console. Which will package and upload the function to the bucket. You can then use the `packaged.yaml` to configure and deploy the stack in [AWS CloudFormation Console](https://console.aws.amazon.com/cloudformation). + +### Example + +Build + +```bash +aws cloudformation validate-template --template-body file://template.yaml 1>/dev/null && +sam validate && +sam build +``` + +Deploy + +```bash +sam deploy --guided +``` + +## License + +[Apache-2.0](/LICENSE) diff --git a/modules/aws-ssosync/docs/img/admin_sdk.png b/modules/aws-ssosync/docs/img/admin_sdk.png new file mode 100644 index 000000000..6ece1a68d Binary files /dev/null and b/modules/aws-ssosync/docs/img/admin_sdk.png differ diff --git a/modules/aws-ssosync/docs/img/create_service_account.png b/modules/aws-ssosync/docs/img/create_service_account.png new file mode 100644 index 000000000..1653f47f9 Binary files /dev/null and b/modules/aws-ssosync/docs/img/create_service_account.png differ diff --git a/modules/aws-ssosync/docs/img/dl_service_account_creds.png b/modules/aws-ssosync/docs/img/dl_service_account_creds.png new file mode 100644 index 000000000..e9552cd79 Binary files /dev/null and b/modules/aws-ssosync/docs/img/dl_service_account_creds.png differ diff --git a/modules/aws-ssosync/iam.tf b/modules/aws-ssosync/iam.tf new file mode 100644 index 000000000..d5ecaf3f0 --- /dev/null +++ b/modules/aws-ssosync/iam.tf @@ -0,0 +1,47 @@ + +data "aws_iam_policy_document" "ssosync_lambda_assume_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "ssosync_lambda_identity_center" { + statement { + effect = "Allow" + actions = [ + "identitystore:DeleteUser", + "identitystore:CreateGroup", + "identitystore:CreateGroupMembership", + "identitystore:ListGroups", + "identitystore:ListUsers", + "identitystore:ListGroupMemberships", + "identitystore:IsMemberInGroups", + "identitystore:GetGroupMembershipId", + "identitystore:DeleteGroupMembership", + "identitystore:DeleteGroup", + "secretsmanager:GetSecretValue", + "kms:Decrypt", + "logs:PutLogEvents", + "logs:CreateLogStream", + "logs:CreateLogGroup" + ] + resources = ["*"] + } +} + +resource "aws_iam_role" "default" { + count = local.enabled ? 1 : 0 + + name = module.this.id + assume_role_policy = data.aws_iam_policy_document.ssosync_lambda_assume_role.json + + inline_policy { + name = "ssosync_lambda_identity_center" + policy = data.aws_iam_policy_document.ssosync_lambda_identity_center.json + } +} diff --git a/modules/aws-ssosync/main.tf b/modules/aws-ssosync/main.tf new file mode 100644 index 000000000..79b24efdb --- /dev/null +++ b/modules/aws-ssosync/main.tf @@ -0,0 +1,128 @@ +locals { + enabled = module.this.enabled + google_credentials = one(data.aws_ssm_parameter.google_credentials[*].value) + scim_endpoint_url = one(data.aws_ssm_parameter.scim_endpoint_url[*].value) + scim_endpoint_access_token = one(data.aws_ssm_parameter.scim_endpoint_access_token[*].value) + identity_store_id = one(data.aws_ssm_parameter.identity_store_id[*].value) + + ssosync_artifact_url = "${var.ssosync_url_prefix}/${var.ssosync_version}/ssosync_Linux_${var.architecture}.tar.gz" + + download_artifact = "ssosync.tar.gz" +} + +data "aws_ssm_parameter" "google_credentials" { + count = local.enabled ? 1 : 0 + name = "${var.google_credentials_ssm_path}/google_credentials" +} + +data "aws_ssm_parameter" "scim_endpoint_url" { + count = local.enabled ? 1 : 0 + name = "${var.google_credentials_ssm_path}/scim_endpoint_url" +} + +data "aws_ssm_parameter" "scim_endpoint_access_token" { + count = local.enabled ? 1 : 0 + name = "${var.google_credentials_ssm_path}/scim_endpoint_access_token" +} + +data "aws_ssm_parameter" "identity_store_id" { + count = local.enabled ? 1 : 0 + name = "${var.google_credentials_ssm_path}/identity_store_id" +} + + +module "ssosync_artifact" { + count = local.enabled ? 1 : 0 + + source = "cloudposse/module-artifact/external" + version = "0.8.0" + + filename = local.download_artifact + module_name = "ssosync" + module_path = path.module + url = local.ssosync_artifact_url +} + +resource "null_resource" "extract_my_tgz" { + count = local.enabled ? 1 : 0 + + provisioner "local-exec" { + command = "tar -xzf ${local.download_artifact} -C dist" + } + + depends_on = [module.ssosync_artifact] +} + +data "archive_file" "lambda" { + count = local.enabled ? 1 : 0 + + type = "zip" + source_file = "dist/ssosync" + output_path = "ssosync.zip" + + depends_on = [null_resource.extract_my_tgz] +} + + +resource "aws_lambda_function" "ssosync" { + count = local.enabled ? 1 : 0 + + function_name = module.this.id + filename = "ssosync.zip" + source_code_hash = module.ssosync_artifact[0].base64sha256 + description = "Syncs Google Workspace users and groups to AWS SSO" + role = aws_iam_role.default[0].arn + handler = "ssosync" + runtime = "go1.x" + timeout = 300 + memory_size = 128 + + environment { + variables = { + SSOSYNC_LOG_LEVEL = var.log_level + SSOSYNC_LOG_FORMAT = var.log_format + SSOSYNC_GOOGLE_CREDENTIALS = local.google_credentials + SSOSYNC_GOOGLE_ADMIN = var.google_admin_email + SSOSYNC_SCIM_ENDPOINT = local.scim_endpoint_url + SSOSYNC_SCIM_ACCESS_TOKEN = local.scim_endpoint_access_token + SSOSYNC_REGION = var.region + SSOSYNC_IDENTITY_STORE_ID = local.identity_store_id + SSOSYNC_USER_MATCH = var.google_user_match + SSOSYNC_GROUP_MATCH = var.google_group_match + SSOSYNC_SYNC_METHOD = var.sync_method + SSOSYNC_IGNORE_GROUPS = var.ignore_groups + SSOSYNC_IGNORE_USERS = var.ignore_users + SSOSYNC_INCLUDE_GROUPS = var.include_groups + SSOSYNC_LOAD_ASM_SECRETS = false + } + } + depends_on = [null_resource.extract_my_tgz, data.archive_file.lambda] +} + +resource "aws_cloudwatch_event_rule" "ssosync" { + count = var.enabled ? 1 : 0 + + name = module.this.id + description = "Run ssosync on a schedule" + schedule_expression = var.schedule_expression + +} + +resource "aws_cloudwatch_event_target" "ssosync" { + count = var.enabled ? 1 : 0 + + rule = aws_cloudwatch_event_rule.ssosync[0].name + target_id = module.this.id + arn = aws_lambda_function.ssosync[0].arn +} + + +resource "aws_lambda_permission" "allow_cloudwatch_execution" { + count = local.enabled ? 1 : 0 + + statement_id = "AllowExecutionFromCloudWatch" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.ssosync[0].arn + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.ssosync[0].arn +} diff --git a/modules/aws-ssosync/outputs.tf b/modules/aws-ssosync/outputs.tf new file mode 100644 index 000000000..5f4ba5421 --- /dev/null +++ b/modules/aws-ssosync/outputs.tf @@ -0,0 +1,14 @@ +output "arn" { + description = "ARN of the lambda function" + value = one(aws_lambda_function.ssosync[*].arn) +} + +output "invoke_arn" { + description = "Invoke ARN of the lambda function" + value = one(aws_lambda_function.ssosync[*].invoke_arn) +} + +output "qualified_arn" { + description = "ARN identifying your Lambda Function Version (if versioning is enabled via publish = true)" + value = one(aws_lambda_function.ssosync[*].qualified_arn) +} diff --git a/modules/aws-ssosync/providers.tf b/modules/aws-ssosync/providers.tf new file mode 100644 index 000000000..dc58d9a25 --- /dev/null +++ b/modules/aws-ssosync/providers.tf @@ -0,0 +1,3 @@ +provider "aws" { + region = var.region +} diff --git a/modules/aws-ssosync/variables.tf b/modules/aws-ssosync/variables.tf new file mode 100644 index 000000000..83ba9c8da --- /dev/null +++ b/modules/aws-ssosync/variables.tf @@ -0,0 +1,102 @@ +variable "region" { + type = string + description = "AWS Region where AWS SSO is enabled" +} + +variable "schedule_expression" { + type = string + description = "Schedule for trigger the execution of ssosync (see CloudWatch schedule expressions)" + default = "rate(15 minutes)" +} + +variable "log_level" { + type = string + description = "Log level for Lambda function logging" + default = "warn" + + validation { + condition = contains(["panic", "fatal", "error", "warn", "info", "debug", "trace"], var.log_level) + error_message = "Allowed values: `panic`, `fatal`, `error`, `warn`, `info`, `debug`, `trace`" + } +} + +variable "log_format" { + type = string + description = "Log format for Lambda function logging" + default = "json" + + validation { + condition = contains(["json", "text"], var.log_format) + error_message = "Allowed values: `json`, `text`" + } +} + +variable "ssosync_url_prefix" { + type = string + description = "URL prefix for ssosync binary" + default = "https://github.com/Benbentwo/ssosync/releases/download" +} + +variable "ssosync_version" { + type = string + description = "Version of ssosync to use" + default = "v2.0.2" +} + +variable "architecture" { + type = string + description = "Architecture of the Lambda function" + default = "x86_64" +} + +variable "google_credentials_ssm_path" { + type = string + description = "SSM Path for `ssosync` secrets" + default = "/ssosync" +} + +variable "google_admin_email" { + type = string + description = "Google Admin email" +} + +variable "google_user_match" { + type = string + description = "Google Workspace user filter query parameter, example: 'name:John* email:admin*', see: https://developers.google.com/admin-sdk/directory/v1/guides/search-users" + default = "" +} + +variable "google_group_match" { + type = string + description = "Google Workspace group filter query parameter, example: 'name:Admin* email:aws-*', see: https://developers.google.com/admin-sdk/directory/v1/guides/search-groups" + default = "" +} + +variable "ignore_groups" { + type = string + description = "Ignore these Google Workspace groups" + default = "" +} + +variable "ignore_users" { + type = string + description = "Ignore these Google Workspace users" + default = "" +} + +variable "include_groups" { + type = string + description = "Include only these Google Workspace groups. (Only applicable for sync_method user_groups)" + default = "" +} + +variable "sync_method" { + type = string + description = "Sync method to use" + default = "groups" + + validation { + condition = contains(["groups", "users_groups"], var.sync_method) + error_message = "Allowed values: `groups`, `users_groups`" + } +} diff --git a/modules/aws-ssosync/versions.tf b/modules/aws-ssosync/versions.tf new file mode 100644 index 000000000..990265a57 --- /dev/null +++ b/modules/aws-ssosync/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + archive = { + source = "hashicorp/archive" + version = ">= 2.3.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.0" + } + } +} diff --git a/modules/aws-team-roles/README.md b/modules/aws-team-roles/README.md index d56ee37ab..61a2b5c0b 100644 --- a/modules/aws-team-roles/README.md +++ b/modules/aws-team-roles/README.md @@ -1,23 +1,86 @@ +--- +tags: + - component/aws-team-roles + - layer/identity + - provider/aws + - privileged +--- + # Component: `aws-team-roles` -This component is responsible for provisioning user and system IAM roles outside the `identity` account. -It sets them up to be assumed from the "team" roles defined in the `identity` account by -[the `aws-teams` component](../aws-teams) and/or the AWS SSO permission sets -defined in [the `aws-sso` component](../aws-sso). +This component is responsible for provisioning user and system IAM roles outside the `identity` account. It sets them up +to be assumed from the "team" roles defined in the `identity` account by [the `aws-teams` component](../aws-teams) +and/or the AWS SSO permission sets defined in [the `aws-sso` component](../aws-sso), and/or be directly accessible via +SAML logins. + +### Privileges are Granted to Users via IAM Policies + +Each role is granted permissions by attaching a list of IAM policies to the IAM role via its `role_policy_arns` list. +You can configure AWS managed policies by entering the ARNs of the policies directly into the list, or you can create a +custom policy as follows: + +1. Give the policy a name, e.g. `eks-admin`. We will use `NAME` as a placeholder for the name in the instructions below. +2. Create a file in the `aws-teams` directory with the name `policy-NAME.tf`. +3. In that file, create a policy as follows: + + ```hcl + data "aws_iam_policy_document" "NAME" { + # Define the policy here + } + + resource "aws_iam_policy" "NAME" { + name = format("%s-NAME", module.this.id) + policy = data.aws_iam_policy_document.NAME.json + + tags = module.this.tags + } + ``` + +4. Create a file named `additional-policy-map_override.tf` in the `aws-team-roles` directory (if it does not already + exist). This is a [terraform override file](https://developer.hashicorp.com/terraform/language/files/override), + meaning its contents will be merged with the main terraform file, and any locals defined in it will override locals + defined in other files. Having your code in this separate override file makes it possible for the component to + provide a placeholder local variable so that it works without customization, while allowing you to customize the + component and still update it without losing your customizations. +5. In that file, redefine the local variable `overridable_additional_custom_policy_map` map as follows: + + ```hcl + locals { + overridable_additional_custom_policy_map = { + NAME = aws_iam_policy.NAME.arn + } + } + ``` + + If you have multiple custom policies, add each one to the map in the form `NAME = aws_iam_policy.NAME.arn`. + +6. With that done, you can now attach that policy by adding the name to the `role_policy_arns` list. For example: + + ```yaml + role_policy_arns: + - "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" + - "NAME" + ``` ## Usage **Stack Level**: Global -**Deployment**: Must be deployed by SuperAdmin using `atmos` CLI -Here's an example snippet for how to use this component. This specific usage is an example only, and not intended for production use. -You set the defaults in one YAML file, and import that file into each account's Global stack (except for the `identity` account itself). -If desired, you can make account-specific changes by overriding settings, for example -- Disable entire roles in the account by setting `enabled: false` +**Deployment**: Must be deployed by _SuperAdmin_ using `atmos` CLI + +Here's an example snippet for how to use this component. This specific usage is an example only, and not intended for +production use. You set the defaults in one YAML file, and import that file into each account's Global stack (except for +the `identity` account itself). If desired, you can make account-specific changes by overriding settings, for example + +- Disable entire roles in the account by setting `enabled: false` - Limit who can access the role by setting a different value for `trusted_teams` -- Change the permissions available to that role by overriding the `role_policy_arns` (not recommended, limit access to the role or create a different role with the desired set of permissions instead). +- Change the permissions available to that role by overriding the `role_policy_arns` (not recommended, limit access to + the role or create a different role with the desired set of permissions instead). -Note that when overriding, **maps are deep merged, but lists are replaced**. This means, for example, that your setting of `trusted_primary_roles` in an override completely replaces the default, it does not add to it, so if you want to allow an extra "primary" role to have access to the role, you have to include all the default "primary" roles in the list, too, or they will lose access. +Note that when overriding, **maps are deep merged, but lists are replaced**. This means, for example, that your setting +of `trusted_primary_roles` in an override completely replaces the default, it does not add to it, so if you want to +allow an extra "primary" role to have access to the role, you have to include all the default "primary" roles in the +list, too, or they will lose access. ```yaml components: @@ -33,8 +96,7 @@ components: # `template` serves as the default configuration for other roles via the YAML anchor. # However, `atmos` does not support "import" of YAML anchors, so if you define a new role # in another file, you will not be able to reference this anchor. - template: &user-template - # If `enabled: false`, the role will not be created in this account + template: &user-template # If `enabled: false`, the role will not be created in this account enabled: false # `max_session_duration` set the maximum session duration (in seconds) for the IAM roles. @@ -86,7 +148,7 @@ components: <<: *user-template enabled: true role_policy_arns: - - "arn:aws:iam::aws:policy/AdministratorAccess" + - "arn:aws:iam::aws:policy/AdministratorAccess" role_description: "Full administration of this account" trusted_teams: ["admin"] @@ -99,12 +161,12 @@ components: # administrative permissions and use a more restrictive role # for Terraform, such as PowerUser (further restricted to deny AWS SSO changes). role_policy_arns: - - "arn:aws:iam::aws:policy/AdministratorAccess" + - "arn:aws:iam::aws:policy/AdministratorAccess" role_description: "Role for Terraform administration of this account" trusted_teams: ["admin", "spacelift"] - ``` + ## Requirements @@ -112,20 +174,21 @@ components: |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.9.0 | +| [local](#requirement\_local) | >= 1.3 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.9.0 | -| [local](#provider\_local) | n/a | +| [local](#provider\_local) | >= 1.3 | ## Modules | Name | Source | Version | |------|--------|---------| | [assume\_role](#module\_assume\_role) | ../account-map/modules/team-assume-role-policy | n/a | -| [aws\_saml](#module\_aws\_saml) | cloudposse/stack-config/yaml//modules/remote-state | 1.0.0 | +| [aws\_saml](#module\_aws\_saml) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -133,19 +196,19 @@ components: | Name | Type | |------|------| -| [aws_iam_policy.billing_admin](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_policy.billing_read_only](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_policy.support](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.eks_viewer](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.kms_planner](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.vpn_planner](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [local_file.account_info](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [aws_iam_policy.aws_billing_admin_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | -| [aws_iam_policy.aws_billing_read_only_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | -| [aws_iam_policy.aws_support_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | | [aws_iam_policy_document.assume_role_aggregated](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.billing_admin_access_aggregated](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.support_access_aggregated](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.support_access_trusted_advisor](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.eks_view_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.eks_viewer_access_aggregated](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.kms_planner_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.kms_planner_access_aggregated](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.vpn_planner_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.vpn_planner_access_aggregated](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | ## Inputs @@ -180,6 +243,9 @@ components: |------|-------------| | [role\_name\_role\_arn\_map](#output\_role\_name\_role\_arn\_map) | Map of role names to role ARNs | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components) - Cloud Posse's upstream components + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components) - Cloud Posse's upstream + components diff --git a/modules/aws-team-roles/additional-policy-map.tf b/modules/aws-team-roles/additional-policy-map.tf new file mode 100644 index 000000000..03735ef4b --- /dev/null +++ b/modules/aws-team-roles/additional-policy-map.tf @@ -0,0 +1,10 @@ +locals { + # If you have custom policies, override this declaration by creating + # a file called `additional-policy-map_override.tf`. + # Then add the custom policies to the overridable_additional_custom_policy_map in that file. + # See the README for more details. + overridable_additional_custom_policy_map = { + # Example: + # eks_viewer = aws_iam_policy.eks_viewer.arn + } +} diff --git a/modules/aws-team-roles/main.tf b/modules/aws-team-roles/main.tf index b812de247..ff68d4191 100644 --- a/modules/aws-team-roles/main.tf +++ b/modules/aws-team-roles/main.tf @@ -10,11 +10,12 @@ locals { # If you want to create custom policies to add to multiple roles by name, create the policy # using an aws_iam_policy resource and then map it to the name you want to use in the # YAML configuration by adding an entry in `custom_policy_map`. - custom_policy_map = { - billing_read_only = try(aws_iam_policy.billing_read_only[0].arn, null) - billing_admin = try(aws_iam_policy.billing_admin[0].arn, null) - support = try(aws_iam_policy.support[0].arn, null) + supplied_custom_policy_map = { + eks_viewer = try(aws_iam_policy.eks_viewer[0].arn, null) + vpn_planner = try(aws_iam_policy.vpn_planner[0].arn, null) + kms_planner = try(aws_iam_policy.kms_planner[0].arn, null) } + custom_policy_map = merge(local.supplied_custom_policy_map, local.overridable_additional_custom_policy_map) configured_policies = flatten([for k, v in local.roles_config : v.role_policy_arns]) diff --git a/modules/aws-team-roles/policy-billing.tf b/modules/aws-team-roles/policy-billing.tf deleted file mode 100644 index 3363c84b3..000000000 --- a/modules/aws-team-roles/policy-billing.tf +++ /dev/null @@ -1,45 +0,0 @@ -locals { - billing_read_only_policy_enabled = contains(local.configured_policies, "billing_read_only") - billing_admin_policy_enabled = contains(local.configured_policies, "billing_admin") -} - -# Billing Read-Only Policies / Roles -data "aws_iam_policy" "aws_billing_read_only_access" { - count = local.billing_read_only_policy_enabled ? 1 : 0 - - arn = "arn:${local.aws_partition}:iam::aws:policy/AWSBillingReadOnlyAccess" -} - -resource "aws_iam_policy" "billing_read_only" { - count = local.billing_read_only_policy_enabled ? 1 : 0 - - name = format("%s-billing", module.this.id) - policy = data.aws_iam_policy.aws_billing_read_only_access[0].policy - - tags = module.this.tags -} - -# Billing Admin Policies / Roles -data "aws_iam_policy" "aws_billing_admin_access" { - count = local.billing_admin_policy_enabled ? 1 : 0 - - arn = "arn:${local.aws_partition}:iam::aws:policy/job-function/Billing" -} - -data "aws_iam_policy_document" "billing_admin_access_aggregated" { - count = local.billing_admin_policy_enabled ? 1 : 0 - - source_policy_documents = [ - data.aws_iam_policy.aws_billing_admin_access[0].policy, - data.aws_iam_policy.aws_support_access[0].policy, # Include support access for the billing role, defined in `support-policy.tf` - ] -} - -resource "aws_iam_policy" "billing_admin" { - count = local.billing_admin_policy_enabled ? 1 : 0 - - name = format("%s-billing-admin", module.this.id) - policy = data.aws_iam_policy_document.billing_admin_access_aggregated[0].json - - tags = module.this.tags -} diff --git a/modules/aws-team-roles/policy-eks-viewer.tf b/modules/aws-team-roles/policy-eks-viewer.tf new file mode 100644 index 000000000..9bbeceb80 --- /dev/null +++ b/modules/aws-team-roles/policy-eks-viewer.tf @@ -0,0 +1,39 @@ +locals { + eks_viewer_enabled = contains(local.configured_policies, "eks_viewer") +} + +data "aws_iam_policy_document" "eks_view_access" { + count = local.eks_viewer_enabled ? 1 : 0 + + statement { + sid = "AllowEKSView" + effect = "Allow" + actions = [ + "eks:Get*", + "eks:Describe*", + "eks:List*", + "eks:Access*" + ] + resources = [ + "*" + ] + } + +} + +data "aws_iam_policy_document" "eks_viewer_access_aggregated" { + count = local.eks_viewer_enabled ? 1 : 0 + + source_policy_documents = [ + data.aws_iam_policy_document.eks_view_access[0].json, + ] +} + +resource "aws_iam_policy" "eks_viewer" { + count = local.eks_viewer_enabled ? 1 : 0 + + name = format("%s-eks_viewer", module.this.id) + policy = data.aws_iam_policy_document.eks_viewer_access_aggregated[0].json + + tags = module.this.tags +} diff --git a/modules/aws-team-roles/policy-kms-planner.tf b/modules/aws-team-roles/policy-kms-planner.tf new file mode 100644 index 000000000..45080b183 --- /dev/null +++ b/modules/aws-team-roles/policy-kms-planner.tf @@ -0,0 +1,48 @@ +locals { + kms_planner_enabled = contains(local.configured_policies, "kms_planner") +} + +data "aws_iam_policy_document" "kms_planner_access" { + count = local.kms_planner_enabled ? 1 : 0 + + statement { + sid = "AllowKMSDecrypt" + effect = "Allow" + + actions = [ + "kms:Decrypt", + ] + + # Only allow decryption of SSM parameters. + # To further restrict to specific parameters, add conditions on the value of + # kms:EncryptionContext:PARAMETER_ARN + # See https://docs.aws.amazon.com/kms/latest/developerguide/services-parameter-store.html#parameter-store-encryption-context + condition { + test = "Null" + variable = "kms:EncryptionContext:PARAMETER_ARN" + values = ["false"] + } + + resources = [ + "*" + ] + } + +} + +data "aws_iam_policy_document" "kms_planner_access_aggregated" { + count = local.kms_planner_enabled ? 1 : 0 + + source_policy_documents = [ + data.aws_iam_policy_document.kms_planner_access[0].json, + ] +} + +resource "aws_iam_policy" "kms_planner" { + count = local.kms_planner_enabled ? 1 : 0 + + name = format("%s-kms_planner", module.this.id) + policy = data.aws_iam_policy_document.kms_planner_access_aggregated[0].json + + tags = module.this.tags +} diff --git a/modules/aws-team-roles/policy-support.tf b/modules/aws-team-roles/policy-support.tf deleted file mode 100644 index ef75409b2..000000000 --- a/modules/aws-team-roles/policy-support.tf +++ /dev/null @@ -1,54 +0,0 @@ -# This Terraform configuration file which creates a customer-managed policy exists in both aws-teams and aws-team-roles. -# -# The reason for this is as follows: -# -# The support role (unlike most roles in the identity account) needs specific access to -# resources in the identity account. Policies must be created per-account, so the identity -# account needs a support policy, and that has to be created in aws-teams. -# -# Other custom roles are only needed in either the identity or the other accounts, not both. -# - -locals { - support_policy_enabled = contains(local.configured_policies, "support") -} - -data "aws_iam_policy_document" "support_access_trusted_advisor" { - count = local.support_policy_enabled ? 1 : 0 - - statement { - sid = "AllowTrustedAdvisor" - effect = "Allow" - actions = [ - "trustedadvisor:Describe*", - ] - - resources = [ - "*", - ] - } -} - -data "aws_iam_policy" "aws_support_access" { - count = local.support_policy_enabled ? 1 : 0 - - arn = "arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess" -} - -data "aws_iam_policy_document" "support_access_aggregated" { - count = local.support_policy_enabled ? 1 : 0 - - source_policy_documents = [ - data.aws_iam_policy.aws_support_access[0].policy, - data.aws_iam_policy_document.support_access_trusted_advisor[0].json - ] -} - -resource "aws_iam_policy" "support" { - count = local.support_policy_enabled ? 1 : 0 - - name = format("%s-support", module.this.id) - policy = data.aws_iam_policy_document.support_access_aggregated[0].json - - tags = module.this.tags -} diff --git a/modules/aws-team-roles/policy-vpn-planner.tf b/modules/aws-team-roles/policy-vpn-planner.tf new file mode 100644 index 000000000..09a4c8c11 --- /dev/null +++ b/modules/aws-team-roles/policy-vpn-planner.tf @@ -0,0 +1,36 @@ +locals { + vpn_planner_enabled = contains(local.configured_policies, "vpn_planner") +} + +data "aws_iam_policy_document" "vpn_planner_access" { + count = local.vpn_planner_enabled ? 1 : 0 + + statement { + sid = "AllowVPNReader" + effect = "Allow" + actions = [ + "ec2:ExportClientVpnClientConfiguration", + ] + resources = [ + "*" + ] + } + +} + +data "aws_iam_policy_document" "vpn_planner_access_aggregated" { + count = local.vpn_planner_enabled ? 1 : 0 + + source_policy_documents = [ + data.aws_iam_policy_document.vpn_planner_access[0].json, + ] +} + +resource "aws_iam_policy" "vpn_planner" { + count = local.vpn_planner_enabled ? 1 : 0 + + name = format("%s-vpn_planner", module.this.id) + policy = data.aws_iam_policy_document.vpn_planner_access_aggregated[0].json + + tags = module.this.tags +} diff --git a/modules/aws-team-roles/remote-state.tf b/modules/aws-team-roles/remote-state.tf index 7f95d1ba6..87a5d8af2 100644 --- a/modules/aws-team-roles/remote-state.tf +++ b/modules/aws-team-roles/remote-state.tf @@ -1,6 +1,6 @@ module "aws_saml" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.0.0" + version = "1.5.0" component = "aws-saml" privileged = true diff --git a/modules/aws-team-roles/versions.tf b/modules/aws-team-roles/versions.tf index cc73ffd35..2fdade250 100644 --- a/modules/aws-team-roles/versions.tf +++ b/modules/aws-team-roles/versions.tf @@ -6,5 +6,9 @@ terraform { source = "hashicorp/aws" version = ">= 4.9.0" } + local = { + source = "hashicorp/local" + version = ">= 1.3" + } } } diff --git a/modules/aws-teams/README.md b/modules/aws-teams/README.md index 7c492d155..fb99606a9 100644 --- a/modules/aws-teams/README.md +++ b/modules/aws-teams/README.md @@ -1,53 +1,62 @@ +--- +tags: + - component/aws-teams + - layer/identity + - provider/aws + - privileged +--- + # Component: `aws-teams` -This component is responsible for provisioning all primary user and system roles into the centralized identity account. -This is expected to be use alongside [the `aws-team-roles` component](../aws-team-roles) to provide -fine grained role delegation across the account hierarchy. +This component is responsible for provisioning all primary user and system roles into the centralized identity account. +This is expected to be used alongside [the `aws-team-roles` component](../aws-team-roles) to provide fine-grained role +delegation across the account hierarchy. ### Teams Function Like Groups and are Implemented as Roles -The "teams" created in the `identity` account by this module can be thought of as access control "groups": -a user who is allowed access one of these teams gets access to a set of roles (and corresponding permissions) -across a set of accounts. Generally, there is nothing else provisioned in the `identity` account, -so the teams have limited access to resources in the `identity` account by design. -Teams are implemented as IAM Roles in each account. Access to the "teams" in the `identity` -account is controlled by the `aws-saml` and `aws-sso` components. Access to the roles in all the -other accounts is controlled by the "assume role" policies of those roles, which allow the "team" -or AWS SSO Permission set to assume the role (or not). +The "teams" created in the `identity` account by this module can be thought of as access control "groups": a user who is +allowed access one of these teams gets access to a set of roles (and corresponding permissions) across a set of +accounts. Generally, there is nothing else provisioned in the `identity` account, so the teams have limited access to +resources in the `identity` account by design. + +Teams are implemented as IAM Roles in each account. Access to the "teams" in the `identity` account is controlled by the +`aws-saml` and `aws-sso` components. Access to the roles in all the other accounts is controlled by the "assume role" +policies of those roles, which allow the "team" or AWS SSO Permission set to assume the role (or not). ### Privileges are Defined for Each Role in Each Account by `aws-team-roles` -Every account besides the `identity` account has a set of IAM roles created by the -`aws-team-roles` component. In that component, the account's roles are assigned privileges, -and those privileges ultimately determine what a user can do in that account. +Every account besides the `identity` account has a set of IAM roles created by the `aws-team-roles` component. In that +component, the account's roles are assigned privileges, and those privileges ultimately determine what a user can do in +that account. -Access to the roles can be granted in a number of ways. -One way is by listing "teams" created by this component as "trusted" (`trusted_teams`), -meaning that users who have access to the team role in the `identity` account are -allowed (trusted) to assume the role configured in the target account. -Another is by listing an AWS SSO Permission Set in the account (`trusted_permission_sets`). +Access to the roles can be granted in a number of ways. One way is by listing "teams" created by this component as +"trusted" (`trusted_teams`), meaning that users who have access to the team role in the `identity` account are allowed +(trusted) to assume the role configured in the target account. Another is by listing an AWS SSO Permission Set in the +account (`trusted_permission_sets`). ### Role Access is Enabled by SAML and/or AWS SSO configuration + Users can again access to a role in the `identity` account through either (or both) of 2 mechanisms: #### SAML Access -- SAML access is globally configured via the `aws-saml` component, enabling an external -SAML Identity Provider (IdP) to control access to roles in the `identity` account. -(SAML access can be separately configured for other accounts, see the `aws-saml` and `aws-team-roles` components for more on that.) + +- SAML access is globally configured via the `aws-saml` component, enabling an external SAML Identity Provider (IdP) to + control access to roles in the `identity` account. (SAML access can be separately configured for other accounts, see + the `aws-saml` and `aws-team-roles` components for more on that.) - Individual roles are enabled for SAML access by setting `aws_saml_login_enabled: true` in the role configuration. - Individual users are granted access to these roles by configuration in the SAML IdP. #### AWS SSO Access -The `aws-sso` component can create AWS Permission Sets that allow users to assume specific roles -in the `identity` account. See the `aws-sso` component for details. + +The `aws-sso` component can create AWS Permission Sets that allow users to assume specific roles in the `identity` +account. See the `aws-sso` component for details. ## Usage -**Stack Level**: Global -**Deployment**: Must be deployed by SuperAdmin using `atmos` CLI +**Stack Level**: Global **Deployment**: Must be deployed by SuperAdmin using `atmos` CLI -Here's an example snippet for how to use this component. The component should only be applied once, -which is typically done via the identity stack (e.g. `gbl-identity.yaml`). +Here's an example snippet for how to use this component. The component should only be applied once, which is typically +done via the identity stack (e.g. `gbl-identity.yaml`). ```yaml components: @@ -87,47 +96,48 @@ components: # If a role is both trusted and denied, it will not be able to access this role. # Permission sets specify users operating from the given AWS SSO permission set in this account. - trusted_permission_sets: [ ] - denied_permission_sets: [ ] + trusted_permission_sets: [] + denied_permission_sets: [] # Primary roles specify the short role names of roles in the primary (identity) # account that are allowed to assume this role. - trusted_teams: [ ] - denied_teams: [ "viewer" ] + trusted_teams: [] + denied_teams: ["viewer"] # Role ARNs specify Role ARNs in any account that are allowed to assume this role. # BE CAREFUL: there is nothing limiting these Role ARNs to roles within our organization. - trusted_role_arns: [ ] - denied_role_arns: [ ] + trusted_role_arns: [] + denied_role_arns: [] admin: <<: *user-template - role_description: "Team with PowerUserAccess permissions in `identity` and AdministratorAccess to all other accounts except `root`" + role_description: + "Team with PowerUserAccess permissions in `identity` and AdministratorAccess to all other accounts except + `root`" # Limit `admin` to Power User to prevent accidentally destroying the admin role itself # Use SuperAdmin to administer IAM access - role_policy_arns: [ "arn:aws:iam::aws:policy/PowerUserAccess" ] + role_policy_arns: ["arn:aws:iam::aws:policy/PowerUserAccess"] # TODO Create a "security" team with AdministratorAccess to audit and security, remove "admin" write access to those accounts aws_saml_login_enabled: true # list of roles in primary that can assume into this role in delegated accounts # primary admin can assume delegated admin - trusted_teams: [ "admin" ] + trusted_teams: ["admin"] # GH runner should be moved to its own `ghrunner` role - trusted_permission_sets: [ "IdentityAdminTeamAccess" ] - + trusted_permission_sets: ["IdentityAdminTeamAccess"] spacelift: <<: *user-template role_description: Team for our privileged Spacelift server role_policy_arns: - - team_role_access + - team_role_access aws_saml_login_enabled: false trusted_teams: - - admin + - admin trusted_role_arns: ["arn:aws:iam::123456789012:role/eg-ue2-auto-spacelift-worker-pool-admin"] - ``` + ## Requirements @@ -135,21 +145,22 @@ components: |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.9.0 | +| [local](#requirement\_local) | >= 1.3 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.9.0 | -| [local](#provider\_local) | n/a | +| [local](#provider\_local) | >= 1.3 | ## Modules | Name | Source | Version | |------|--------|---------| -| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.0.0 | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [assume\_role](#module\_assume\_role) | ../account-map/modules/team-assume-role-policy | n/a | -| [aws\_saml](#module\_aws\_saml) | cloudposse/stack-config/yaml//modules/remote-state | 1.0.0 | +| [aws\_saml](#module\_aws\_saml) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -157,15 +168,11 @@ components: | Name | Type | |------|------| -| [aws_iam_policy.support](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy.team_role_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [local_file.account_info](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [aws_iam_policy.aws_support_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | | [aws_iam_policy_document.assume_role_aggregated](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.support_access_aggregated](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.support_access_trusted_advisor](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.team_role_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | ## Inputs @@ -195,7 +202,7 @@ components: | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | -| [teams\_config](#input\_teams\_config) | A roles map to configure the accounts. |
map(object({
denied_teams = list(string)
denied_permission_sets = list(string)
denied_role_arns = list(string)
max_session_duration = number # in seconds 3600 <= max <= 43200 (12 hours)
role_description = string
role_policy_arns = list(string)
aws_saml_login_enabled = bool
trusted_teams = list(string)
trusted_permission_sets = list(string)
trusted_role_arns = list(string)
}))
| n/a | yes | +| [teams\_config](#input\_teams\_config) | A roles map to configure the accounts. |
map(object({
denied_teams = list(string)
denied_permission_sets = list(string)
denied_role_arns = list(string)
max_session_duration = number # in seconds 3600 <= max <= 43200 (12 hours)
role_description = string
role_policy_arns = list(string)
aws_saml_login_enabled = bool
allowed_roles = optional(map(list(string)), {})
trusted_teams = list(string)
trusted_permission_sets = list(string)
trusted_role_arns = list(string)
}))
| n/a | yes | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [trusted\_github\_repos](#input\_trusted\_github\_repos) | Map where keys are role names (same keys as `teams_config`) and values are lists of
GitHub repositories allowed to assume those roles. See `account-map/modules/github-assume-role-policy.mixin.tf`
for specifics about repository designations. | `map(list(string))` | `{}` | no | @@ -208,7 +215,42 @@ components: | [team\_names](#output\_team\_names) | List of team names | | [teams\_config](#output\_teams\_config) | Map of team config with name, target arn, and description | + + +## Known Problems + +### Error: `assume role policy: LimitExceeded: Cannot exceed quota for ACLSizePerRole: 2048` + +The `aws-teams` architecture, when enabling access to a role via lots of AWS SSO Profiles, can create large "assume +role" policies, large enough to exceed the default quota of 2048 characters. If you run into this limitation, you will +get an error like this: + +``` +Error: error updating IAM Role (acme-gbl-root-tfstate-backend-analytics-ro) assume role policy: LimitExceeded: Cannot exceed quota for ACLSizePerRole: 2048 +``` + +This can happen in either/both the `identity` and `root` accounts (for Terraform state access). So far, we have always +been able to resolve this by requesting a quota increase, which is automatically granted a few minutes after making the +request. To request the quota increase: + +- Log in to the AWS Web console as admin in the affected account + +- Set your region to N. Virginia `us-east-1` + +- Navigate to the Service Quotas page via the account dropdown menu +- Click on AWS Services in the left sidebar + +- Search for "IAM" and select "AWS Identity and Access Management (IAM)". (If you don't find that option, make sure you + have selected the `us-east-1` region. + +- Find and select "Role trust policy length" + +- Request an increase to 4096 characters + +- Wait for the request to be approved, usually less than a few minutes ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components)- Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components)- Cloud Posse's upstream + component diff --git a/modules/aws-teams/additional-policy-map.tf b/modules/aws-teams/additional-policy-map.tf new file mode 100644 index 000000000..13ffa4c3d --- /dev/null +++ b/modules/aws-teams/additional-policy-map.tf @@ -0,0 +1,10 @@ +locals { + # If you have custom policies, override this declaration by creating + # a file called `additional-policy-map_override.tf`. + # Then add the custom policies to the overridable_additional_custom_policy_map in that file. + # See the README in `aws-team-roles` for more details. + overridable_additional_custom_policy_map = { + # Example: + # eks_viewer = aws_iam_policy.eks_viewer.arn + } +} diff --git a/modules/aws-teams/main.tf b/modules/aws-teams/main.tf index adb3606d7..18b18bb44 100644 --- a/modules/aws-teams/main.tf +++ b/modules/aws-teams/main.tf @@ -6,10 +6,10 @@ locals { # If you want to create custom policies to add to multiple roles by name, create the policy # using an aws_iam_policy resource and then map it to the name you want to use in the # YAML configuration by adding an entry in `custom_policy_map`. - custom_policy_map = { + supplied_custom_policy_map = { team_role_access = aws_iam_policy.team_role_access.arn - support = try(aws_iam_policy.support[0].arn, null) } + custom_policy_map = merge(local.supplied_custom_policy_map, local.overridable_additional_custom_policy_map) configured_policies = flatten([for k, v in local.roles_config : v.role_policy_arns]) @@ -36,7 +36,7 @@ module "assume_role" { for_each = local.roles_config source = "../account-map/modules/team-assume-role-policy" - allowed_roles = { (local.identity_account_account_name) = each.value.trusted_teams } + allowed_roles = merge(each.value.allowed_roles, { (local.identity_account_account_name) = each.value.trusted_teams }) denied_roles = { (local.identity_account_account_name) = each.value.denied_teams } allowed_principal_arns = each.value.trusted_role_arns denied_principal_arns = each.value.denied_role_arns diff --git a/modules/aws-teams/outputs.tf b/modules/aws-teams/outputs.tf index 7cb23df80..697a29951 100644 --- a/modules/aws-teams/outputs.tf +++ b/modules/aws-teams/outputs.tf @@ -25,5 +25,5 @@ resource "local_file" "account_info" { role_name_role_arn_map = local.role_name_role_arn_map namespace = module.this.namespace }) - filename = "${path.module}/../aws-team-roles/iam-role-info/${module.this.id}.sh" + filename = "${path.module}/../aws-team-roles/iam-role-info/${module.this.id}-teams.sh" } diff --git a/modules/aws-teams/policy-support.tf b/modules/aws-teams/policy-support.tf deleted file mode 100644 index ebd22c783..000000000 --- a/modules/aws-teams/policy-support.tf +++ /dev/null @@ -1,56 +0,0 @@ -# This Terraform configuration file which creates a customer-managed policy exists in both -# aws-teams and aws-team-roles. -# - -# The reason for this is as follows: -# -# The support role (unlike most roles in the identity account) needs specific access to -# resources in the identity account. Policies must be created per-account, so the identity -# account needs a support policy, and that has to be created in aws-teams. -# -# Most other custom roles are only needed in either aws-teams and aws-team-roles, not both. -# - -locals { - support_policy_enabled = contains(local.configured_policies, "support") -} - -data "aws_iam_policy_document" "support_access_trusted_advisor" { - count = local.support_policy_enabled ? 1 : 0 - - statement { - sid = "AllowTrustedAdvisor" - effect = "Allow" - actions = [ - "trustedadvisor:Describe*", - ] - - resources = [ - "*", - ] - } -} - -data "aws_iam_policy" "aws_support_access" { - count = local.support_policy_enabled ? 1 : 0 - - arn = "arn:${local.aws_partition}:iam::aws:policy/AWSSupportAccess" -} - -data "aws_iam_policy_document" "support_access_aggregated" { - count = local.support_policy_enabled ? 1 : 0 - - source_policy_documents = [ - data.aws_iam_policy.aws_support_access[0].policy, - data.aws_iam_policy_document.support_access_trusted_advisor[0].json - ] -} - -resource "aws_iam_policy" "support" { - count = local.support_policy_enabled ? 1 : 0 - - name = format("%s-support", module.this.id) - policy = data.aws_iam_policy_document.support_access_aggregated[0].json - - tags = module.this.tags -} diff --git a/modules/aws-teams/policy-team-role-access.tf b/modules/aws-teams/policy-team-role-access.tf index f35f242a5..bdbc88a93 100644 --- a/modules/aws-teams/policy-team-role-access.tf +++ b/modules/aws-teams/policy-team-role-access.tf @@ -8,6 +8,7 @@ data "aws_iam_policy_document" "team_role_access" { effect = "Allow" actions = [ "sts:AssumeRole", + "sts:SetSourceIdentity", "sts:TagSession", ] resources = [ @@ -21,15 +22,6 @@ data "aws_iam_policy_document" "team_role_access" { actions = ["sts:GetCallerIdentity"] resources = ["*"] } - - statement { - sid = "DenyIdentityAssumeRole" - effect = "Deny" - actions = ["sts:AssumeRole"] - resources = [ - format("arn:%s:iam::%s:role/*", local.aws_partition, local.identity_account_id), - ] - } } resource "aws_iam_policy" "team_role_access" { diff --git a/modules/aws-teams/remote-state.tf b/modules/aws-teams/remote-state.tf index 8672fe26d..bb8c8f1b3 100644 --- a/modules/aws-teams/remote-state.tf +++ b/modules/aws-teams/remote-state.tf @@ -1,6 +1,6 @@ module "aws_saml" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.0.0" + version = "1.5.0" component = "aws-saml" privileged = true @@ -16,13 +16,13 @@ module "aws_saml" { module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.0.0" + version = "1.5.0" component = "account-map" - environment = var.account_map_environment_name - stage = var.account_map_stage_name + tenant = module.iam_roles.global_tenant_name + environment = module.iam_roles.global_environment_name + stage = module.iam_roles.global_stage_name privileged = true context = module.this.context } - diff --git a/modules/aws-teams/variables.tf b/modules/aws-teams/variables.tf index f50c1c513..026dd9465 100644 --- a/modules/aws-teams/variables.tf +++ b/modules/aws-teams/variables.tf @@ -13,6 +13,7 @@ variable "teams_config" { role_description = string role_policy_arns = list(string) aws_saml_login_enabled = bool + allowed_roles = optional(map(list(string)), {}) trusted_teams = list(string) trusted_permission_sets = list(string) trusted_role_arns = list(string) diff --git a/modules/aws-teams/versions.tf b/modules/aws-teams/versions.tf index cc73ffd35..2fdade250 100644 --- a/modules/aws-teams/versions.tf +++ b/modules/aws-teams/versions.tf @@ -6,5 +6,9 @@ terraform { source = "hashicorp/aws" version = ">= 4.9.0" } + local = { + source = "hashicorp/local" + version = ">= 1.3" + } } } diff --git a/modules/aws-waf-acl/default.auto.tfvars b/modules/aws-waf-acl/default.auto.tfvars deleted file mode 100644 index 6950c9724..000000000 --- a/modules/aws-waf-acl/default.auto.tfvars +++ /dev/null @@ -1,7 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - -name = "waf" - -default_action = "allow" diff --git a/modules/aws-waf-acl/providers.tf b/modules/aws-waf-acl/providers.tf deleted file mode 100644 index c6e854450..000000000 --- a/modules/aws-waf-acl/providers.tf +++ /dev/null @@ -1,15 +0,0 @@ -provider "aws" { - region = var.region - profile = coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) -} - -module "iam_roles" { - source = "../account-map/modules/iam-roles" - context = module.this.context -} - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} diff --git a/modules/bastion/README.md b/modules/bastion/README.md index af2e8e8a0..07bb8f01f 100644 --- a/modules/bastion/README.md +++ b/modules/bastion/README.md @@ -1,10 +1,22 @@ +--- +tags: + - component/bastion + - layer/network + - provider/aws +--- + # Component: `bastion` -This component is responsible for provisioning a generic Bastion host within an ASG with parameterized `user_data` and support for AWS SSM Session Manager for remote access with IAM authentication. +This component is responsible for provisioning a generic Bastion host within an ASG with parameterized `user_data` and +support for AWS SSM Session Manager for remote access with IAM authentication. -If a special `container.sh` script is desired to run, set `container_enabled` to `true`, and set the `image_repository` and `image_container` variables. +If a special `container.sh` script is desired to run, set `container_enabled` to `true`, and set the `image_repository` +and `image_container` variables. -By default, this component acts as an "SSM Bastion", which is deployed to a private subnet and has SSM Enabled, allowing access via the AWS Console, AWS CLI, or SSM Session tools such as [aws-gate](https://github.com/xen0l/aws-gate). Alternatively, this component can be used as a regular SSH Bastion, deployed to a public subnet with Security Group Rules allowing inbound traffic over port 22. +By default, this component acts as an "SSM Bastion", which is deployed to a private subnet and has SSM Enabled, allowing +access via the AWS Console, AWS CLI, or SSM Session tools such as [aws-gate](https://github.com/xen0l/aws-gate). +Alternatively, this component can be used as a regular SSH Bastion, deployed to a public subnet with Security Group +Rules allowing inbound traffic over port 22. ## Usage @@ -19,6 +31,8 @@ components: vars: enabled: true name: bastion-ssm + # Your choice of availability zones. If not specified, all private subnets are used. + availability_zones: ["us-east-1a", "us-east-1b", "us-east-1c"] instance_type: t3.micro image_container: infrastructure:latest image_repository: "111111111111.dkr.ecr.us-east-1.amazonaws.com/example/infrastructure" @@ -39,43 +53,45 @@ components: custom_bastion_hostname: bastion vanity_domain: example.com security_group_rules: - - type : "ingress" - from_port : 22 - to_port : 22 - protocol : tcp - cidr_blocks : ["1.2.3.4/32"] - - type : "egress" - from_port : 0 - to_port : 0 - protocol : -1 - cidr_blocks : ["0.0.0.0/0"] + - type: "ingress" + from_port: 22 + to_port: 22 + protocol: tcp + cidr_blocks: ["1.2.3.4/32"] + - type: "egress" + from_port: 0 + to_port: 0 + protocol: -1 + cidr_blocks: ["0.0.0.0/0"] ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [cloudinit](#requirement\_cloudinit) | >= 2.2 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [cloudinit](#provider\_cloudinit) | n/a | +| [aws](#provider\_aws) | >= 4.0 | +| [cloudinit](#provider\_cloudinit) | >= 2.2 | ## Modules | Name | Source | Version | |------|--------|---------| -| [bastion\_autoscale\_group](#module\_bastion\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.30.1 | +| [bastion\_autoscale\_group](#module\_bastion\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.35.1 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [sg](#module\_sg) | cloudposse/security-group/aws | 1.0.1 | +| [sg](#module\_sg) | cloudposse/security-group/aws | 2.2.0 | | [ssm\_tls\_ssh\_key\_pair](#module\_ssm\_tls\_ssh\_key\_pair) | cloudposse/ssm-tls-ssh-key-pair/aws | 0.10.2 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -96,20 +112,16 @@ components: | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [associate\_public\_ip\_address](#input\_associate\_public\_ip\_address) | Whether to associate public IP to the instance. | `bool` | `false` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [availability\_zones](#input\_availability\_zones) | AWS Availability Zones in which to deploy multi-AZ resources.
If not provided, resources will be provisioned in every private subnet in the VPC. | `list(string)` | `[]` | no | | [container\_command](#input\_container\_command) | The container command passed in after `docker run --rm -it bash -c`. | `string` | `"bash"` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [custom\_bastion\_hostname](#input\_custom\_bastion\_hostname) | Hostname to assign with bastion instance | `string` | `null` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [ebs\_block\_device\_volume\_size](#input\_ebs\_block\_device\_volume\_size) | The volume size (in GiB) to provision for the EBS block device. Creation skipped if size is 0 | `number` | `0` | no | -| [ebs\_delete\_on\_termination](#input\_ebs\_delete\_on\_termination) | Whether the EBS volume should be destroyed on instance termination | `bool` | `false` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | | [image\_container](#input\_image\_container) | The image container to use in `container.sh`. | `string` | `""` | no | | [image\_repository](#input\_image\_repository) | The image repository to use in `container.sh`. | `string` | `""` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [instance\_type](#input\_instance\_type) | Bastion instance type | `string` | `"t2.micro"` | no | | [kms\_alias\_name\_ssm](#input\_kms\_alias\_name\_ssm) | KMS alias name for SSM | `string` | `"alias/aws/ssm"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | @@ -120,13 +132,10 @@ components: | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS region | `string` | n/a | yes | -| [root\_block\_device\_volume\_size](#input\_root\_block\_device\_volume\_size) | The volume size (in GiB) to provision for the root block device. It cannot be smaller than the AMI it refers to. | `number` | `8` | no | -| [security\_group\_rules](#input\_security\_group\_rules) | A list of maps of Security Group rules.
The values of map is fully complated with `aws_security_group_rule` resource.
To get more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule . | `list(any)` |
[
{
"cidr_blocks": [
"0.0.0.0/0"
],
"from_port": 0,
"protocol": -1,
"to_port": 0,
"type": "egress"
},
{
"cidr_blocks": [
"0.0.0.0/0"
],
"from_port": 22,
"protocol": "tcp",
"to_port": 22,
"type": "ingress"
}
]
| no | +| [security\_group\_rules](#input\_security\_group\_rules) | A list of maps of Security Group rules.
The values of map is fully completed with `aws_security_group_rule` resource.
To get more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule . | `list(any)` |
[
{
"cidr_blocks": [
"0.0.0.0/0"
],
"from_port": 0,
"protocol": -1,
"to_port": 0,
"type": "egress"
},
{
"cidr_blocks": [
"0.0.0.0/0"
],
"from_port": 22,
"protocol": "tcp",
"to_port": 22,
"type": "ingress"
}
]
| no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [user\_data](#input\_user\_data) | User data content | `list(string)` | `[]` | no | -| [vanity\_domain](#input\_vanity\_domain) | Vanity domain | `string` | `null` | no | ## Outputs @@ -136,8 +145,11 @@ components: | [iam\_instance\_profile](#output\_iam\_instance\_profile) | Name of AWS IAM Instance Profile | | [security\_group\_id](#output\_security\_group\_id) | ID on the AWS Security Group associated with the ASG | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/bastion) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/bastion) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/bastion/default.auto.tfvars b/modules/bastion/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/bastion/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/bastion/main.tf b/modules/bastion/main.tf index 3b39a74be..5b4bfa5f5 100644 --- a/modules/bastion/main.tf +++ b/modules/bastion/main.tf @@ -1,10 +1,13 @@ locals { - enabled = module.this.enabled - vpc_id = module.vpc.outputs.vpc_id - vpc_private_subnet_ids = module.vpc.outputs.private_subnet_ids - vpc_public_subnet_ids = module.vpc.outputs.public_subnet_ids + enabled = module.this.enabled + vpc_id = module.vpc.outputs.vpc_id + vpc_outputs = module.vpc.outputs + + # Get only the subnets that correspond to the AZs provided in `var.availability_zones` if set. + # `az_private_subnets_map` and `az_public_subnets_map` are a map of AZ names to list of subnet IDs in the AZs + vpc_private_subnet_ids = length(var.availability_zones) == 0 ? module.vpc.outputs.private_subnet_ids : flatten([for k, v in local.vpc_outputs.az_private_subnets_map : v if contains(var.availability_zones, k)]) + vpc_public_subnet_ids = length(var.availability_zones) == 0 ? module.vpc.outputs.public_subnet_ids : flatten([for k, v in local.vpc_outputs.az_public_subnets_map : v if contains(var.availability_zones, k)]) vpc_subnet_ids = var.associate_public_ip_address ? local.vpc_public_subnet_ids : local.vpc_private_subnet_ids - route52_enabled = var.associate_public_ip_address && var.custom_bastion_hostname != null && var.vanity_domain != null userdata_template = "${path.module}/templates/user-data.sh" container_template = "${path.module}/templates/container.sh" @@ -32,11 +35,10 @@ locals { module "sg" { source = "cloudposse/security-group/aws" - version = "1.0.1" + version = "2.2.0" - security_group_description = "Security group for Bastion Hosts" - allow_all_egress = true - vpc_id = local.vpc_id + rules = var.security_group_rules + vpc_id = local.vpc_id context = module.this.context } @@ -92,23 +94,23 @@ data "aws_ami" "bastion_image" { module "bastion_autoscale_group" { source = "cloudposse/ec2-autoscale-group/aws" - version = "0.30.1" + version = "0.35.1" - image_id = join("", data.aws_ami.bastion_image.*.id) + image_id = join("", data.aws_ami.bastion_image[*].id) instance_type = var.instance_type - subnet_ids = local.vpc_private_subnet_ids + subnet_ids = local.vpc_subnet_ids health_check_type = "EC2" min_size = 1 max_size = 2 default_cooldown = 300 scale_down_cooldown_seconds = 300 wait_for_capacity_timeout = "10m" - user_data_base64 = join("", data.cloudinit_config.config[0].*.rendered) + user_data_base64 = join("", data.cloudinit_config.config[0][*].rendered) tags = module.this.tags security_group_ids = [module.sg.id] - iam_instance_profile_name = join("", aws_iam_instance_profile.default.*.name) + iam_instance_profile_name = join("", aws_iam_instance_profile.default[*].name) block_device_mappings = [] - associate_public_ip_address = false + associate_public_ip_address = var.associate_public_ip_address # Auto-scaling policies and CloudWatch metric alarms autoscaling_policies_enabled = true diff --git a/modules/bastion/outputs.tf b/modules/bastion/outputs.tf index 545ae6333..b9753f8f3 100644 --- a/modules/bastion/outputs.tf +++ b/modules/bastion/outputs.tf @@ -4,7 +4,7 @@ output "autoscaling_group_id" { } output "iam_instance_profile" { - value = join("", aws_iam_instance_profile.default.*.name) + value = join("", aws_iam_instance_profile.default[*].name) description = "Name of AWS IAM Instance Profile" } diff --git a/modules/bastion/providers.tf b/modules/bastion/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/bastion/providers.tf +++ b/modules/bastion/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/bastion/remote-state.tf b/modules/bastion/remote-state.tf index 1b1079219..757ef9067 100644 --- a/modules/bastion/remote-state.tf +++ b/modules/bastion/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "vpc" diff --git a/modules/bastion/templates/user-data.sh b/modules/bastion/templates/user-data.sh index 741a38eca..3665a4aed 100644 --- a/modules/bastion/templates/user-data.sh +++ b/modules/bastion/templates/user-data.sh @@ -1,13 +1,5 @@ #!/bin/bash -# Mount additional volume -echo "Mounting additional volume..." -while [ ! -b $(readlink -f /dev/sdh) ]; do echo 'waiting for device /dev/sdh'; sleep 5 ; done -blkid $(readlink -f /dev/sdh) || mkfs -t ext4 $(readlink -f /dev/sdh) -e2label $(readlink -f /dev/sdh) sdh-volume -grep -q ^LABEL=sdh-volume /etc/fstab || echo 'LABEL=sdh-volume /mnt ext4 defaults' >> /etc/fstab -grep -q \"^$(readlink -f /dev/sdh) /mnt \" /proc/mounts || mount /mnt - # Install docker echo "Installing docker..." amazon-linux-extras install docker diff --git a/modules/bastion/variables.tf b/modules/bastion/variables.tf index 5e1444e65..f7cc2638a 100644 --- a/modules/bastion/variables.tf +++ b/modules/bastion/variables.tf @@ -3,42 +3,27 @@ variable "region" { description = "AWS region" } +variable "availability_zones" { + type = list(string) + description = <<-EOT + AWS Availability Zones in which to deploy multi-AZ resources. + If not provided, resources will be provisioned in every private subnet in the VPC. + EOT + default = [] +} + variable "instance_type" { type = string default = "t2.micro" description = "Bastion instance type" } -variable "root_block_device_volume_size" { - type = number - default = 8 - description = "The volume size (in GiB) to provision for the root block device. It cannot be smaller than the AMI it refers to." -} - variable "associate_public_ip_address" { type = bool default = false description = "Whether to associate public IP to the instance." } -variable "ebs_block_device_volume_size" { - type = number - default = 0 - description = "The volume size (in GiB) to provision for the EBS block device. Creation skipped if size is 0" -} - -variable "ebs_delete_on_termination" { - type = bool - default = false - description = "Whether the EBS volume should be destroyed on instance termination" -} - -variable "user_data" { - type = list(string) - default = [] - description = "User data content" -} - variable "security_group_rules" { type = list(any) default = [ @@ -59,25 +44,14 @@ variable "security_group_rules" { ] description = <<-EOT A list of maps of Security Group rules. - The values of map is fully complated with `aws_security_group_rule` resource. + The values of map is fully completed with `aws_security_group_rule` resource. To get more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule . EOT } -variable "custom_bastion_hostname" { - type = string - default = null - description = "Hostname to assign with bastion instance" -} - -variable "vanity_domain" { - type = string - default = null - description = "Vanity domain" -} - # AWS KMS alias used for encryption/decryption of SSM secure strings variable "kms_alias_name_ssm" { + type = string default = "alias/aws/ssm" description = "KMS alias name for SSM" } diff --git a/modules/bastion/versions.tf b/modules/bastion/versions.tf index e89eb16ed..6b9a92762 100644 --- a/modules/bastion/versions.tf +++ b/modules/bastion/versions.tf @@ -4,7 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" + } + cloudinit = { + source = "hashicorp/cloudinit" + version = ">= 2.2" } } } diff --git a/modules/cloudmap-namespace/context.tf b/modules/cloudmap-namespace/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/cloudmap-namespace/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/cloudmap-namespace/main.tf b/modules/cloudmap-namespace/main.tf new file mode 100644 index 000000000..4cd7cbc21 --- /dev/null +++ b/modules/cloudmap-namespace/main.tf @@ -0,0 +1,22 @@ +locals { + enabled = module.this.enabled +} + +resource "aws_service_discovery_private_dns_namespace" "default" { + count = local.enabled && var.type == "private" ? 1 : 0 + name = module.this.id + description = var.description + vpc = module.vpc.outputs.vpc_id +} + +resource "aws_service_discovery_public_dns_namespace" "default" { + count = local.enabled && var.type == "public" ? 1 : 0 + name = module.this.id + description = var.description +} + +resource "aws_service_discovery_http_namespace" "default" { + count = local.enabled && var.type == "http" ? 1 : 0 + name = module.this.id + description = var.description +} diff --git a/modules/cloudmap-namespace/outputs.tf b/modules/cloudmap-namespace/outputs.tf new file mode 100644 index 000000000..40210a4e4 --- /dev/null +++ b/modules/cloudmap-namespace/outputs.tf @@ -0,0 +1,14 @@ +output "name" { + value = module.this.id + description = "The name of the namespace" +} + +output "id" { + value = coalesce(one(aws_service_discovery_http_namespace.default[*].id), one(aws_service_discovery_private_dns_namespace.default[*].id), one(aws_service_discovery_public_dns_namespace.default[*].id)) + description = "The ID of the namespace" +} + +output "arn" { + value = coalesce(one(aws_service_discovery_http_namespace.default[*].arn), one(aws_service_discovery_private_dns_namespace.default[*].arn), one(aws_service_discovery_public_dns_namespace.default[*].arn)) + description = "The ARN of the namespace" +} diff --git a/modules/cloudmap-namespace/providers.tf b/modules/cloudmap-namespace/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/cloudmap-namespace/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/cloudmap-namespace/remote-state.tf b/modules/cloudmap-namespace/remote-state.tf new file mode 100644 index 000000000..757ef9067 --- /dev/null +++ b/modules/cloudmap-namespace/remote-state.tf @@ -0,0 +1,8 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "vpc" + + context = module.this.context +} diff --git a/modules/cloudmap-namespace/variables.tf b/modules/cloudmap-namespace/variables.tf new file mode 100644 index 000000000..c127b391b --- /dev/null +++ b/modules/cloudmap-namespace/variables.tf @@ -0,0 +1,19 @@ +variable "region" { + type = string + description = "AWS region" +} + +variable "description" { + type = string + description = "Description of the Cloud Map Namespace" +} + +variable "type" { + type = string + description = "Type of the Cloud Map Namespace" + default = "http" + validation { + condition = contains(["http", "private", "public"], var.type) + error_message = "Invalid namespace type, must be one of `http` or `private` or `public`" + } +} diff --git a/modules/cloudmap-namespace/versions.tf b/modules/cloudmap-namespace/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/modules/cloudmap-namespace/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/cloudtrail-bucket/README.md b/modules/cloudtrail-bucket/README.md index 150c0c069..817b955b1 100644 --- a/modules/cloudtrail-bucket/README.md +++ b/modules/cloudtrail-bucket/README.md @@ -1,12 +1,22 @@ +--- +tags: + - component/cloudtrail-bucket + - layer/foundation + - provider/aws +--- + # Component: `cloudtrail-bucket` -This component is responsible for provisioning a bucket for storing cloudtrail logs for auditing purposes. It's expected to be used alongside [the `cloudtrail` component](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/cloudtrail). +This component is responsible for provisioning a bucket for storing cloudtrail logs for auditing purposes. It's expected +to be used alongside +[the `cloudtrail` component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/cloudtrail). ## Usage **Stack Level**: Regional -Here's an example snippet for how to use this component. It's suggested to apply this component to only the centralized `audit` account. +Here's an example snippet for how to use this component. It's suggested to apply this component to only the centralized +`audit` account. ```yaml components: @@ -22,6 +32,7 @@ components: expiration_days: 365 ``` + ## Requirements @@ -38,7 +49,7 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [cloudtrail\_s3\_bucket](#module\_cloudtrail\_s3\_bucket) | cloudposse/cloudtrail-s3-bucket/aws | 0.23.1 | +| [cloudtrail\_s3\_bucket](#module\_cloudtrail\_s3\_bucket) | cloudposse/cloudtrail-s3-bucket/aws | 0.26.1 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -51,6 +62,7 @@ No resources. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [access\_log\_bucket\_name](#input\_access\_log\_bucket\_name) | If var.create\_access\_log\_bucket is false, this is the name of the S3 bucket where s3 access logs will be sent to. | `string` | `""` | no | +| [acl](#input\_acl) | The canned ACL to apply. We recommend log-delivery-write for
compatibility with AWS services. Valid values are private, public-read,
public-read-write, aws-exec-read, authenticated-read, bucket-owner-read,
bucket-owner-full-control, log-delivery-write.

Due to https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-faq.html, this
will need to be set to 'private' during creation, but you can update normally after. | `string` | `"log-delivery-write"` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | @@ -62,8 +74,6 @@ No resources. | [expiration\_days](#input\_expiration\_days) | Number of days after which to expunge the objects | `number` | `90` | no | | [glacier\_transition\_days](#input\_glacier\_transition\_days) | Number of days after which to move the data to the glacier storage tier | `number` | `60` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -72,7 +82,7 @@ No resources. | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [noncurrent\_version\_expiration\_days](#input\_noncurrent\_version\_expiration\_days) | Specifies when noncurrent object versions expire | `number` | `90` | no | -| [noncurrent\_version\_transition\_days](#input\_noncurrent\_version\_transition\_days) | Specifies when noncurrent object versions transitions | `number` | `30` | no | +| [noncurrent\_version\_transition\_days](#input\_noncurrent\_version\_transition\_days) | Specifies when noncurrent object versions transition to a different storage tier | `number` | `30` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | @@ -88,9 +98,11 @@ No resources. | [cloudtrail\_bucket\_domain\_name](#output\_cloudtrail\_bucket\_domain\_name) | CloudTrail S3 bucket domain name | | [cloudtrail\_bucket\_id](#output\_cloudtrail\_bucket\_id) | CloudTrail S3 bucket ID | + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/cloudtrail-bucket) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/cloudtrail-bucket) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/cloudtrail-bucket/main.tf b/modules/cloudtrail-bucket/main.tf index 805d13c6e..1f7f89450 100644 --- a/modules/cloudtrail-bucket/main.tf +++ b/modules/cloudtrail-bucket/main.tf @@ -1,7 +1,8 @@ module "cloudtrail_s3_bucket" { source = "cloudposse/cloudtrail-s3-bucket/aws" - version = "0.23.1" + version = "0.26.1" + acl = var.acl expiration_days = var.expiration_days force_destroy = false glacier_transition_days = var.glacier_transition_days diff --git a/modules/cloudtrail-bucket/providers.tf b/modules/cloudtrail-bucket/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/cloudtrail-bucket/providers.tf +++ b/modules/cloudtrail-bucket/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/cloudtrail-bucket/variables.tf b/modules/cloudtrail-bucket/variables.tf index cff8e964f..479c1e91c 100644 --- a/modules/cloudtrail-bucket/variables.tf +++ b/modules/cloudtrail-bucket/variables.tf @@ -10,28 +10,33 @@ variable "lifecycle_rule_enabled" { } variable "noncurrent_version_expiration_days" { - description = "Specifies when noncurrent object versions expire" + type = number default = 90 + description = "Specifies when noncurrent object versions expire" } variable "noncurrent_version_transition_days" { - description = "Specifies when noncurrent object versions transitions" + type = number default = 30 + description = "Specifies when noncurrent object versions transition to a different storage tier" } variable "standard_transition_days" { - description = "Number of days to persist in the standard storage tier before moving to the infrequent access tier" + type = number default = 30 + description = "Number of days to persist in the standard storage tier before moving to the infrequent access tier" } variable "glacier_transition_days" { - description = "Number of days after which to move the data to the glacier storage tier" + type = number default = 60 + description = "Number of days after which to move the data to the glacier storage tier" } variable "expiration_days" { - description = "Number of days after which to expunge the objects" + type = number default = 90 + description = "Number of days after which to expunge the objects" } variable "create_access_log_bucket" { @@ -45,3 +50,17 @@ variable "access_log_bucket_name" { default = "" description = "If var.create_access_log_bucket is false, this is the name of the S3 bucket where s3 access logs will be sent to." } + +variable "acl" { + type = string + description = <<-EOT + The canned ACL to apply. We recommend log-delivery-write for + compatibility with AWS services. Valid values are private, public-read, + public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, + bucket-owner-full-control, log-delivery-write. + + Due to https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-faq.html, this + will need to be set to 'private' during creation, but you can update normally after. + EOT + default = "log-delivery-write" +} diff --git a/modules/cloudtrail/README.md b/modules/cloudtrail/README.md index adc078cec..b42770ebf 100644 --- a/modules/cloudtrail/README.md +++ b/modules/cloudtrail/README.md @@ -1,11 +1,19 @@ +--- +tags: + - component/cloudtrail + - layer/foundation + - provider/aws +--- + # Component: `cloudtrail` -This component is responsible for provisioning cloudtrail auditing in an individual account. It's expected to be used alongside -[the `cloudtrail-bucket` component](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/cloudtrail-bucket) +This component is responsible for provisioning cloudtrail auditing in an individual account. It's expected to be used +alongside +[the `cloudtrail-bucket` component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/cloudtrail-bucket) as it utilizes that bucket via remote state. -This component can either be deployed selectively to various accounts with `is_organization_trail=false`, or alternatively -created in all accounts if deployed to the management account `is_organization_trail=true`. +This component can either be deployed selectively to various accounts with `is_organization_trail=false`, or +alternatively created in all accounts if deployed to the management account `is_organization_trail=true`. ## Usage @@ -27,6 +35,7 @@ components: is_organization_trail: true ``` + ## Requirements @@ -45,8 +54,9 @@ components: | Name | Source | Version | |------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [cloudtrail](#module\_cloudtrail) | cloudposse/cloudtrail/aws | 0.21.0 | -| [cloudtrail\_bucket](#module\_cloudtrail\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.0 | +| [cloudtrail\_bucket](#module\_cloudtrail\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [kms\_key\_cloudtrail](#module\_kms\_key\_cloudtrail) | cloudposse/kms-key/aws | 0.12.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -71,6 +81,8 @@ components: |------|-------------|------|---------|:--------:| | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [audit\_access\_enabled](#input\_audit\_access\_enabled) | If `true`, allows the Audit account access to read Cloudtrail logs directly from S3. This is a requirement for running Athena queries in the Audit account. | `bool` | `false` | no | +| [cloudtrail\_bucket\_component\_name](#input\_cloudtrail\_bucket\_component\_name) | The name of the CloudTrail bucket component | `string` | `"cloudtrail-bucket"` | no | | [cloudtrail\_bucket\_environment\_name](#input\_cloudtrail\_bucket\_environment\_name) | The name of the environment where the CloudTrail bucket is provisioned | `string` | n/a | yes | | [cloudtrail\_bucket\_stage\_name](#input\_cloudtrail\_bucket\_stage\_name) | The stage name where the CloudTrail bucket is provisioned | `string` | n/a | yes | | [cloudtrail\_cloudwatch\_logs\_role\_max\_session\_duration](#input\_cloudtrail\_cloudwatch\_logs\_role\_max\_session\_duration) | The maximum session duration (in seconds) for the CloudTrail CloudWatch Logs role. Can have a value from 1 hour to 12 hours | `number` | `43200` | no | @@ -83,8 +95,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [include\_global\_service\_events](#input\_include\_global\_service\_events) | Specifies whether the trail is publishing events from global services such as IAM to the log files | `bool` | `true` | no | | [is\_multi\_region\_trail](#input\_is\_multi\_region\_trail) | Specifies whether the trail is created in the current region or in all regions | `bool` | `true` | no | | [is\_organization\_trail](#input\_is\_organization\_trail) | Specifies whether the trail is created for all accounts in an organization in AWS Organizations, or only for the current AWS account.

The default is false, and cannot be true unless the call is made on behalf of an AWS account that is the management account
for an organization in AWS Organizations. | `bool` | `false` | no | @@ -112,9 +122,11 @@ components: | [cloudtrail\_logs\_role\_arn](#output\_cloudtrail\_logs\_role\_arn) | CloudTrail Logs role ARN | | [cloudtrail\_logs\_role\_name](#output\_cloudtrail\_logs\_role\_name) | CloudTrail Logs role name | + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/cloudtrail) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/cloudtrail) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/cloudtrail/cloudtrail-kms-key.tf b/modules/cloudtrail/cloudtrail-kms-key.tf index bebc60e60..693cf94f9 100644 --- a/modules/cloudtrail/cloudtrail-kms-key.tf +++ b/modules/cloudtrail/cloudtrail-kms-key.tf @@ -1,3 +1,8 @@ +locals { + audit_access_enabled = module.this.enabled && var.audit_access_enabled + audit_account_id = module.account_map.outputs.full_account_map[module.account_map.outputs.audit_account_account_name] +} + module "kms_key_cloudtrail" { source = "cloudposse/kms-key/aws" version = "0.12.1" @@ -72,4 +77,24 @@ data "aws_iam_policy_document" "kms_key_cloudtrail" { ] } } + + dynamic "statement" { + for_each = local.audit_access_enabled ? [1] : [] + content { + sid = "Allow Audit to decrypt with the KMS key" + effect = "Allow" + actions = [ + "kms:Decrypt*", + ] + resources = [ + "*" + ] + principals { + type = "AWS" + identifiers = [ + format("arn:${join("", data.aws_partition.current[*].partition)}:iam::%s:root", local.audit_account_id) + ] + } + } + } } diff --git a/modules/cloudtrail/providers.tf b/modules/cloudtrail/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/cloudtrail/providers.tf +++ b/modules/cloudtrail/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/cloudtrail/remote-state.tf b/modules/cloudtrail/remote-state.tf index 590119fc5..78a54796b 100644 --- a/modules/cloudtrail/remote-state.tf +++ b/modules/cloudtrail/remote-state.tf @@ -1,10 +1,22 @@ module "cloudtrail_bucket" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.0" + version = "1.5.0" - component = "cloudtrail-bucket" + component = var.cloudtrail_bucket_component_name environment = var.cloudtrail_bucket_environment_name stage = var.cloudtrail_bucket_stage_name context = module.this.context } + +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + tenant = module.iam_roles.global_tenant_name + environment = module.iam_roles.global_environment_name + stage = module.iam_roles.global_stage_name + + context = module.this.context +} diff --git a/modules/cloudtrail/variables.tf b/modules/cloudtrail/variables.tf index b718d2cb4..889cf1db5 100644 --- a/modules/cloudtrail/variables.tf +++ b/modules/cloudtrail/variables.tf @@ -39,6 +39,12 @@ variable "cloudtrail_cloudwatch_logs_role_max_session_duration" { description = "The maximum session duration (in seconds) for the CloudTrail CloudWatch Logs role. Can have a value from 1 hour to 12 hours" } +variable "cloudtrail_bucket_component_name" { + type = string + description = "The name of the CloudTrail bucket component" + default = "cloudtrail-bucket" +} + variable "cloudtrail_bucket_environment_name" { type = string description = "The name of the environment where the CloudTrail bucket is provisioned" @@ -59,3 +65,9 @@ variable "is_organization_trail" { for an organization in AWS Organizations. EOT } + +variable "audit_access_enabled" { + type = bool + default = false + description = "If `true`, allows the Audit account access to read Cloudtrail logs directly from S3. This is a requirement for running Athena queries in the Audit account." +} diff --git a/modules/cloudwatch-logs/README.md b/modules/cloudwatch-logs/README.md index 11e816db1..e1244f1b9 100644 --- a/modules/cloudwatch-logs/README.md +++ b/modules/cloudwatch-logs/README.md @@ -1,3 +1,11 @@ +--- +tags: + - component/cloudwatch-logs + - layer/baseline + - layer/security-and-compliance + - provider/aws +--- + # Component: `cloudwatch-logs` This component is responsible for creation of CloudWatch Log Streams and Log Groups. @@ -21,6 +29,7 @@ components: - app-2 ``` + ## Requirements @@ -65,8 +74,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -92,8 +99,11 @@ components: | [role\_name](#output\_role\_name) | Name of role to assume | | [stream\_arns](#output\_stream\_arns) | ARN of the log stream | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/cloudwatch-logs) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/cloudwatch-logs) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/cloudwatch-logs/default.auto.tfvars b/modules/cloudwatch-logs/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/cloudwatch-logs/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/cloudwatch-logs/providers.tf b/modules/cloudwatch-logs/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/cloudwatch-logs/providers.tf +++ b/modules/cloudwatch-logs/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/cognito/README.md b/modules/cognito/README.md index 1531e3d54..0b29c148b 100644 --- a/modules/cognito/README.md +++ b/modules/cognito/README.md @@ -1,16 +1,22 @@ +--- +tags: + - component/cognito + - layer/addons + - provider/aws +--- + # Component: `cognito` This component is responsible for provisioning and managing AWS Cognito resources. This component can provision the following resources: - - [Cognito User Pools](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html) - - [Cognito User Pool Clients](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html) - - [Cognito User Pool Domains](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-add-custom-domain.html) - - [Cognito User Pool Identity Providers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-identity-provider.html) - - [Cognito User Pool Resource Servers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-define-resource-servers.html) - - [Cognito User Pool User Groups](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-user-groups.html) - +- [Cognito User Pools](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html) +- [Cognito User Pool Clients](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html) +- [Cognito User Pool Domains](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-add-custom-domain.html) +- [Cognito User Pool Identity Providers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-identity-provider.html) +- [Cognito User Pool Resource Servers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-define-resource-servers.html) +- [Cognito User Pool User Groups](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-user-groups.html) ## Usage @@ -37,6 +43,7 @@ components: required: true ``` + ## Requirements @@ -101,6 +108,7 @@ components: | [client\_write\_attributes](#input\_client\_write\_attributes) | List of user pool attributes the application client can write to | `list(string)` | `[]` | no | | [clients](#input\_clients) | User Pool clients configuration | `any` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [deletion\_protection](#input\_deletion\_protection) | (Optional) When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. Valid values are ACTIVE and INACTIVE, Default value is INACTIVE. | `string` | `"INACTIVE"` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [device\_configuration](#input\_device\_configuration) | The configuration for the user pool's device tracking | `map(any)` | `{}` | no | @@ -119,7 +127,6 @@ components: | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | | [identity\_providers](#input\_identity\_providers) | Cognito Identity Providers configuration | `list(any)` | `[]` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -204,9 +211,11 @@ components: | [last\_modified\_date](#output\_last\_modified\_date) | The date the User Pool was last modified | | [resource\_servers\_scope\_identifiers](#output\_resource\_servers\_scope\_identifiers) | A list of all scopes configured in the format identifier/scope\_name | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/cognito) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/cognito) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/cognito/main.tf b/modules/cognito/main.tf index bf05fd81c..aec78d656 100644 --- a/modules/cognito/main.tf +++ b/modules/cognito/main.tf @@ -149,6 +149,7 @@ resource "aws_cognito_user_pool" "pool" { mfa_configuration = var.mfa_configuration sms_authentication_message = var.sms_authentication_message sms_verification_message = var.sms_verification_message + deletion_protection = var.deletion_protection dynamic "username_configuration" { for_each = local.username_configuration diff --git a/modules/cognito/providers.tf b/modules/cognito/providers.tf index de2e8a327..ef923e10a 100644 --- a/modules/cognito/providers.tf +++ b/modules/cognito/providers.tf @@ -1,16 +1,19 @@ provider "aws" { region = var.region - profile = coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } } module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile to use when importing a resource" -} diff --git a/modules/cognito/variables.tf b/modules/cognito/variables.tf index 5dfc5671c..687cd36de 100644 --- a/modules/cognito/variables.tf +++ b/modules/cognito/variables.tf @@ -559,3 +559,9 @@ variable "identity_providers" { type = list(any) default = [] } + +variable "deletion_protection" { + description = "(Optional) When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. Valid values are ACTIVE and INACTIVE, Default value is INACTIVE." + type = string + default = "INACTIVE" +} diff --git a/modules/config-bucket/README.md b/modules/config-bucket/README.md new file mode 100644 index 000000000..0c3371a86 --- /dev/null +++ b/modules/config-bucket/README.md @@ -0,0 +1,115 @@ +--- +tags: + - component/config-bucket + - layer/security-and-compliance + - provider/aws +--- + +# Component: `config-bucket` + +This module creates an S3 bucket suitable for storing `AWS Config` data. + +It implements a configurable log retention policy, which allows you to efficiently manage logs across different storage +classes (_e.g._ `Glacier`) and ultimately expire the data altogether. + +It enables server-side encryption by default. +https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html + +It blocks public access to the bucket by default. +https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. It's suggested to apply this component to only the centralized +`audit` account. + +```yaml +components: + terraform: + config-bucket: + vars: + enabled: true + name: "config" + noncurrent_version_expiration_days: 180 + noncurrent_version_transition_days: 30 + standard_transition_days: 60 + glacier_transition_days: 180 + expiration_days: 365 +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [config\_bucket](#module\_config\_bucket) | cloudposse/config-storage/aws | 1.0.2 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [access\_log\_bucket\_name](#input\_access\_log\_bucket\_name) | Name of the S3 bucket where s3 access log will be sent to | `string` | `""` | no | +| [acl](#input\_acl) | The canned ACL to apply. We recommend log-delivery-write for compatibility with AWS services | `string` | `"log-delivery-write"` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enable\_glacier\_transition](#input\_enable\_glacier\_transition) | Enables the transition to AWS Glacier (note that this can incur unnecessary costs for huge amount of small files | `bool` | `true` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [expiration\_days](#input\_expiration\_days) | Number of days after which to expunge the objects | `number` | `90` | no | +| [glacier\_transition\_days](#input\_glacier\_transition\_days) | Number of days after which to move the data to the glacier storage tier | `number` | `60` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [lifecycle\_rule\_enabled](#input\_lifecycle\_rule\_enabled) | Enable lifecycle events on this bucket | `bool` | `true` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [noncurrent\_version\_expiration\_days](#input\_noncurrent\_version\_expiration\_days) | Specifies when noncurrent object versions expire | `number` | `90` | no | +| [noncurrent\_version\_transition\_days](#input\_noncurrent\_version\_transition\_days) | Specifies when noncurrent object versions transition to a different storage tier | `number` | `30` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [standard\_transition\_days](#input\_standard\_transition\_days) | Number of days to persist in the standard storage tier before moving to the infrequent access tier | `number` | `30` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [config\_bucket\_arn](#output\_config\_bucket\_arn) | Config bucket ARN | +| [config\_bucket\_domain\_name](#output\_config\_bucket\_domain\_name) | Config bucket FQDN | +| [config\_bucket\_id](#output\_config\_bucket\_id) | Config bucket ID | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/config-bucket) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/config-bucket/context.tf b/modules/config-bucket/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/config-bucket/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/config-bucket/main.tf b/modules/config-bucket/main.tf new file mode 100644 index 000000000..20f09eb32 --- /dev/null +++ b/modules/config-bucket/main.tf @@ -0,0 +1,16 @@ +module "config_bucket" { + source = "cloudposse/config-storage/aws" + version = "1.0.2" + + expiration_days = var.expiration_days + force_destroy = false + glacier_transition_days = var.glacier_transition_days + lifecycle_rule_enabled = var.lifecycle_rule_enabled + noncurrent_version_expiration_days = var.noncurrent_version_expiration_days + noncurrent_version_transition_days = var.noncurrent_version_transition_days + sse_algorithm = "AES256" + standard_transition_days = var.standard_transition_days + access_log_bucket_name = var.access_log_bucket_name + + context = module.this.context +} diff --git a/modules/config-bucket/outputs.tf b/modules/config-bucket/outputs.tf new file mode 100644 index 000000000..6f926cba5 --- /dev/null +++ b/modules/config-bucket/outputs.tf @@ -0,0 +1,14 @@ +output "config_bucket_domain_name" { + value = module.config_bucket.bucket_domain_name + description = "Config bucket FQDN" +} + +output "config_bucket_id" { + value = module.config_bucket.bucket_id + description = "Config bucket ID" +} + +output "config_bucket_arn" { + value = module.config_bucket.bucket_arn + description = "Config bucket ARN" +} diff --git a/modules/config-bucket/providers.tf b/modules/config-bucket/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/config-bucket/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/config-bucket/variables.tf b/modules/config-bucket/variables.tf new file mode 100644 index 000000000..0a5ff0a12 --- /dev/null +++ b/modules/config-bucket/variables.tf @@ -0,0 +1,58 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "lifecycle_rule_enabled" { + type = bool + description = "Enable lifecycle events on this bucket" + default = true +} + +variable "noncurrent_version_expiration_days" { + type = number + default = 90 + description = "Specifies when noncurrent object versions expire" +} + +variable "noncurrent_version_transition_days" { + type = number + default = 30 + description = "Specifies when noncurrent object versions transition to a different storage tier" +} + +variable "standard_transition_days" { + type = number + default = 30 + description = "Number of days to persist in the standard storage tier before moving to the infrequent access tier" +} + +variable "glacier_transition_days" { + type = number + default = 60 + description = "Number of days after which to move the data to the glacier storage tier" +} + +variable "enable_glacier_transition" { + type = bool + default = true + description = "Enables the transition to AWS Glacier (note that this can incur unnecessary costs for huge amount of small files" +} + +variable "expiration_days" { + type = number + default = 90 + description = "Number of days after which to expunge the objects" +} + +variable "access_log_bucket_name" { + type = string + default = "" + description = "Name of the S3 bucket where s3 access log will be sent to" +} + +variable "acl" { + type = string + description = "The canned ACL to apply. We recommend log-delivery-write for compatibility with AWS services" + default = "log-delivery-write" +} diff --git a/modules/config-bucket/versions.tf b/modules/config-bucket/versions.tf new file mode 100644 index 000000000..cc73ffd35 --- /dev/null +++ b/modules/config-bucket/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/datadog-agent/asm.tf b/modules/datadog-agent/asm.tf deleted file mode 100644 index a4e64b0b6..000000000 --- a/modules/datadog-agent/asm.tf +++ /dev/null @@ -1,19 +0,0 @@ -data "aws_secretsmanager_secret" "datadog_api_key" { - count = local.enabled && var.secrets_store_type == "ASM" ? 1 : 0 - name = var.datadog_api_secret_key -} - -data "aws_secretsmanager_secret_version" "datadog_api_key" { - count = local.enabled && var.secrets_store_type == "ASM" ? 1 : 0 - secret_id = data.aws_secretsmanager_secret.datadog_api_key[0].id -} - -data "aws_secretsmanager_secret" "datadog_app_key" { - count = local.enabled && var.secrets_store_type == "ASM" ? 1 : 0 - name = var.datadog_app_secret_key -} - -data "aws_secretsmanager_secret_version" "datadog_app_key" { - count = local.enabled && var.secrets_store_type == "ASM" ? 1 : 0 - secret_id = data.aws_secretsmanager_secret.datadog_app_key[0].id -} diff --git a/modules/datadog-agent/ssm.tf b/modules/datadog-agent/ssm.tf deleted file mode 100644 index a78029cb9..000000000 --- a/modules/datadog-agent/ssm.tf +++ /dev/null @@ -1,11 +0,0 @@ -data "aws_ssm_parameter" "datadog_api_key" { - count = local.enabled && var.secrets_store_type == "SSM" ? 1 : 0 - name = format("/%s", var.datadog_api_secret_key) - with_decryption = true -} - -data "aws_ssm_parameter" "datadog_app_key" { - count = local.enabled && var.secrets_store_type == "SSM" ? 1 : 0 - name = format("/%s", var.datadog_app_secret_key) - with_decryption = true -} diff --git a/modules/datadog-agent/values.yaml b/modules/datadog-agent/values.yaml deleted file mode 100644 index 8c64186ec..000000000 --- a/modules/datadog-agent/values.yaml +++ /dev/null @@ -1,62 +0,0 @@ -datadog: - logLevel: INFO - kubeStateMetricsEnabled: false - kubeStateMetricsCore: - enabled: true - collectEvents: true - leaderElection: true - logs: - enabled: true - containerCollectAll: true - containerCollectUsingFiles: true - apm: - enabled: false - processAgent: - enabled: true - processCollection: true - systemProbe: - enableTCPQueueLength: true - enableOOMKill: true - collectDNSStats: true - enableConntrack: true - bpfDebug: false - networkMonitoring: - enabled: true - clusterChecksRunner: - enabled: false - clusterChecks: - enabled: true - dogstatsd: - useHostPort: true - nonLocalTraffic: true - securityAgent: - runtime: - enabled: true - compliance: - enabled: true - helmCheck: - enabled: true - collectEvents: true -clusterAgent: - enabled: true - replicas: 1 - metricsProvider: - enabled: false - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 300m - memory: 512Mi -agents: - priorityClassName: "system-node-critical" - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists -# Per https://github.com/DataDog/helm-charts/blob/main/charts/datadog/README.md#configuration-required-for-amazon-linux-2-based-nodes - podSecurity: - apparmor: - enabled: false diff --git a/modules/datadog-configuration/README.md b/modules/datadog-configuration/README.md new file mode 100644 index 000000000..a9673333b --- /dev/null +++ b/modules/datadog-configuration/README.md @@ -0,0 +1,163 @@ +--- +tags: + - component/datadog-configuration + - layer/datadog + - provider/datadog + - provider/aws +--- + +# Component: `datadog-configuration` + +This component is responsible for provisioning SSM or ASM entries for Datadog API keys. + +It's required that the DataDog API and APP secret keys are available in the `var.datadog_secrets_source_store_account` +account in AWS SSM Parameter Store at the `/datadog/%v/datadog_app_key` paths (where `%v` are the corresponding account +names). + +This component copies keys from the source account (e.g. `auto`) to the destination account where this is being +deployed. The purpose of using this formatted copying of keys handles a couple of problems. + +1. The keys are needed in each account where datadog resources will be deployed. +1. The keys might need to be different per account or tenant, or any subset of accounts. +1. If the keys need to be rotated they can be rotated from a single management account. + +This module also has a submodule which allows other resources to quickly use it to create a datadog provider. + +See Datadog's [documentation about provisioning keys](https://docs.datadoghq.com/account_management/api-app-keys) for +more information. + +## Usage + +**Stack Level**: Global + +This component should be deployed to every account where you want to provision datadog resources. This is usually every +account except `root` and `identity` + +Here's an example snippet for how to use this component. It's suggested to apply this component to all accounts which +you want to track AWS metrics with DataDog. In this example we use the key paths `/datadog/%v/datadog_api_key` and +`/datadog/%v/datadog_app_key` where `%v` is `default`, this can be changed through `datadog_app_secret_key` & +`datadog_api_secret_key` variables. The output Keys in the deployed account will be `/datadog/datadog_api_key` and +`/datadog/datadog_app_key`. + +```yaml +components: + terraform: + datadog-configuration: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: datadog-configuration + datadog_secrets_store_type: SSM + datadog_secrets_source_store_account_stage: auto + datadog_secrets_source_store_account_region: "us-east-2" +``` + +Here is a snippet of using the `datadog_keys` submodule: + +```terraform +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws.api\_keys](#provider\_aws.api\_keys) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [iam\_roles\_datadog\_secrets](#module\_iam\_roles\_datadog\_secrets) | ../account-map/modules/iam-roles | n/a | +| [store\_write](#module\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.10.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_secretsmanager_secret.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | +| [aws_secretsmanager_secret.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | +| [aws_secretsmanager_secret_version.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | +| [aws_secretsmanager_secret_version.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | +| [aws_ssm_parameter.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [datadog\_api\_secret\_key](#input\_datadog\_api\_secret\_key) | The name of the Datadog API secret | `string` | `"default"` | no | +| [datadog\_api\_secret\_key\_source\_pattern](#input\_datadog\_api\_secret\_key\_source\_pattern) | The format string (%v will be replaced by the var.datadog\_api\_secret\_key) for the key of the Datadog API secret in the source account | `string` | `"/datadog/%v/datadog_api_key"` | no | +| [datadog\_api\_secret\_key\_target\_pattern](#input\_datadog\_api\_secret\_key\_target\_pattern) | The format string (%v will be replaced by the var.datadog\_api\_secret\_key) for the key of the Datadog API secret in the target account | `string` | `"/datadog/datadog_api_key"` | no | +| [datadog\_app\_secret\_key](#input\_datadog\_app\_secret\_key) | The name of the Datadog APP secret | `string` | `"default"` | no | +| [datadog\_app\_secret\_key\_source\_pattern](#input\_datadog\_app\_secret\_key\_source\_pattern) | The format string (%v will be replaced by the var.datadog\_app\_secret\_key) for the key of the Datadog APP secret in the source account | `string` | `"/datadog/%v/datadog_app_key"` | no | +| [datadog\_app\_secret\_key\_target\_pattern](#input\_datadog\_app\_secret\_key\_target\_pattern) | The format string (%v will be replaced by the var.datadog\_api\_secret\_key) for the key of the Datadog APP secret in the target account | `string` | `"/datadog/datadog_app_key"` | no | +| [datadog\_secrets\_source\_store\_account\_region](#input\_datadog\_secrets\_source\_store\_account\_region) | Region for holding Secret Store Datadog Keys, leave as null to use the same region as the stack | `string` | `null` | no | +| [datadog\_secrets\_source\_store\_account\_stage](#input\_datadog\_secrets\_source\_store\_account\_stage) | Stage holding Secret Store for Datadog API and app keys. | `string` | `"auto"` | no | +| [datadog\_secrets\_source\_store\_account\_tenant](#input\_datadog\_secrets\_source\_store\_account\_tenant) | Tenant holding Secret Store for Datadog API and app keys. | `string` | `"core"` | no | +| [datadog\_secrets\_store\_type](#input\_datadog\_secrets\_store\_type) | Secret Store type for Datadog API and app keys. Valid values: `SSM`, `ASM` | `string` | `"SSM"` | no | +| [datadog\_site\_url](#input\_datadog\_site\_url) | The Datadog Site URL, https://docs.datadoghq.com/getting_started/site/ | `string` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [datadog\_api\_key\_location](#output\_datadog\_api\_key\_location) | The Datadog API key in the secrets store | +| [datadog\_api\_url](#output\_datadog\_api\_url) | The URL of the Datadog API | +| [datadog\_app\_key\_location](#output\_datadog\_app\_key\_location) | The Datadog APP key location in the secrets store | +| [datadog\_secrets\_store\_type](#output\_datadog\_secrets\_store\_type) | The type of the secrets store to use for Datadog API and APP keys | +| [datadog\_site](#output\_datadog\_site) | The Datadog site to use | +| [region](#output\_region) | The region where the keys will be created | + + + +## References + +- Datadog's [documentation about provisioning keys](https://docs.datadoghq.com/account_management/api-app-keys) +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/datadog-configuration) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/datadog-integration/asm.tf b/modules/datadog-configuration/asm.tf similarity index 100% rename from modules/datadog-integration/asm.tf rename to modules/datadog-configuration/asm.tf diff --git a/modules/datadog-configuration/context.tf b/modules/datadog-configuration/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/datadog-configuration/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/datadog-configuration/main.tf b/modules/datadog-configuration/main.tf new file mode 100644 index 000000000..e4dffa51a --- /dev/null +++ b/modules/datadog-configuration/main.tf @@ -0,0 +1,12 @@ +locals { + enabled = module.this.enabled + asm_enabled = local.enabled && var.datadog_secrets_store_type == "ASM" + ssm_enabled = local.enabled && var.datadog_secrets_store_type == "SSM" + + # https://docs.datadoghq.com/account_management/api-app-keys/ + datadog_api_key = local.asm_enabled ? data.aws_secretsmanager_secret_version.datadog_api_key[0].secret_string : local.ssm_enabled ? data.aws_ssm_parameter.datadog_api_key[0].value : "" + datadog_app_key = local.asm_enabled ? data.aws_secretsmanager_secret_version.datadog_app_key[0].secret_string : local.ssm_enabled ? data.aws_ssm_parameter.datadog_app_key[0].value : "" + + datadog_site = coalesce(var.datadog_site_url, "datadoghq.com") + datadog_api_url = format("https://api.%s", local.datadog_site) +} diff --git a/modules/datadog-configuration/modules/datadog_keys/README.md b/modules/datadog-configuration/modules/datadog_keys/README.md new file mode 100644 index 000000000..ea8c64aa5 --- /dev/null +++ b/modules/datadog-configuration/modules/datadog_keys/README.md @@ -0,0 +1,100 @@ +--- +tags: + - component/datadog_keys + - layer/datadog + - provider/datadog + - provider/aws +--- + +# Component: `datadog_keys` + +Useful submodule for other modules to quickly configure the datadog provider + +## Usage + +```hcl +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws.dd\_api\_keys](#provider\_aws.dd\_api\_keys) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [always](#module\_always) | cloudposse/label/null | 0.25.0 | +| [datadog\_configuration](#module\_datadog\_configuration) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [utils\_example\_complete](#module\_utils\_example\_complete) | cloudposse/utils/aws | 1.3.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ssm_parameter.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [global\_environment\_name](#input\_global\_environment\_name) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [api\_key\_ssm\_arn](#output\_api\_key\_ssm\_arn) | Datadog API Key SSM ARN | +| [datadog\_api\_key](#output\_datadog\_api\_key) | Datadog API Key | +| [datadog\_api\_key\_location](#output\_datadog\_api\_key\_location) | The Datadog API key in the secrets store | +| [datadog\_api\_url](#output\_datadog\_api\_url) | Datadog API URL | +| [datadog\_app\_key](#output\_datadog\_app\_key) | Datadog APP Key | +| [datadog\_app\_key\_location](#output\_datadog\_app\_key\_location) | The Datadog APP key location in the secrets store | +| [datadog\_secrets\_store\_type](#output\_datadog\_secrets\_store\_type) | The type of the secrets store to use for Datadog API and APP keys | +| [datadog\_site](#output\_datadog\_site) | Datadog Site | +| [datadog\_tags](#output\_datadog\_tags) | The Context Tags in datadog tag format (list of strings formatted as 'key:value') | + + diff --git a/modules/datadog-configuration/modules/datadog_keys/context.tf b/modules/datadog-configuration/modules/datadog_keys/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/datadog-configuration/modules/datadog_keys/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/datadog-configuration/modules/datadog_keys/main.tf b/modules/datadog-configuration/modules/datadog_keys/main.tf new file mode 100644 index 000000000..7fcfcd54a --- /dev/null +++ b/modules/datadog-configuration/modules/datadog_keys/main.tf @@ -0,0 +1,52 @@ +module "always" { + source = "cloudposse/label/null" + version = "0.25.0" + + # datadog configuration must always be enabled, even for components that are disabled + # this allows datadog provider to be configured correctly and properly delete resources. + enabled = true + + context = module.this.context +} + +module "utils_example_complete" { + source = "cloudposse/utils/aws" + version = "1.3.0" +} + +locals { + context_tags = { + for k, v in module.this.tags : + lower(k) => v + } + dd_tags = [ + for k, v in local.context_tags : + v != null ? format("%s:%s", k, v) : k + ] +} + +module "datadog_configuration" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "datadog-configuration" + + environment = var.global_environment_name + context = module.always.context +} + +data "aws_ssm_parameter" "datadog_api_key" { + count = module.this.enabled ? 1 : 0 + + provider = aws.dd_api_keys + + name = module.datadog_configuration.outputs.datadog_api_key_location +} + +data "aws_ssm_parameter" "datadog_app_key" { + count = module.this.enabled ? 1 : 0 + + provider = aws.dd_api_keys + + name = module.datadog_configuration.outputs.datadog_app_key_location +} diff --git a/modules/datadog-configuration/modules/datadog_keys/outputs.tf b/modules/datadog-configuration/modules/datadog_keys/outputs.tf new file mode 100644 index 000000000..d5a7d62a6 --- /dev/null +++ b/modules/datadog-configuration/modules/datadog_keys/outputs.tf @@ -0,0 +1,44 @@ +output "datadog_api_key" { + value = one(data.aws_ssm_parameter.datadog_api_key[*].value) + description = "Datadog API Key" +} + +output "datadog_app_key" { + value = one(data.aws_ssm_parameter.datadog_app_key[*].value) + description = "Datadog APP Key" +} + +output "datadog_api_url" { + value = module.datadog_configuration.outputs.datadog_api_url + description = "Datadog API URL" +} + +output "datadog_site" { + value = module.datadog_configuration.outputs.datadog_site + description = "Datadog Site" +} + +output "api_key_ssm_arn" { + value = one(data.aws_ssm_parameter.datadog_api_key[*].arn) + description = "Datadog API Key SSM ARN" +} + +output "datadog_secrets_store_type" { + value = module.datadog_configuration.outputs.datadog_secrets_store_type + description = "The type of the secrets store to use for Datadog API and APP keys" +} + +output "datadog_app_key_location" { + value = module.datadog_configuration.outputs.datadog_app_key_location + description = "The Datadog APP key location in the secrets store" +} + +output "datadog_api_key_location" { + value = module.datadog_configuration.outputs.datadog_api_key_location + description = "The Datadog API key in the secrets store" +} + +output "datadog_tags" { + value = local.dd_tags + description = "The Context Tags in datadog tag format (list of strings formatted as 'key:value')" +} diff --git a/modules/datadog-configuration/modules/datadog_keys/providers.tf b/modules/datadog-configuration/modules/datadog_keys/providers.tf new file mode 100644 index 000000000..f039376e1 --- /dev/null +++ b/modules/datadog-configuration/modules/datadog_keys/providers.tf @@ -0,0 +1,20 @@ +provider "aws" { + region = module.datadog_configuration.outputs.region + alias = "dd_api_keys" + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/datadog-configuration/modules/datadog_keys/variables.tf b/modules/datadog-configuration/modules/datadog_keys/variables.tf new file mode 100644 index 000000000..baa0ba33c --- /dev/null +++ b/modules/datadog-configuration/modules/datadog_keys/variables.tf @@ -0,0 +1,5 @@ +variable "global_environment_name" { + type = string + description = "Global environment name" + default = "gbl" +} diff --git a/modules/datadog-configuration/modules/datadog_keys/versions.tf b/modules/datadog-configuration/modules/datadog_keys/versions.tf new file mode 100644 index 000000000..fe97db94b --- /dev/null +++ b/modules/datadog-configuration/modules/datadog_keys/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/datadog-configuration/outputs.tf b/modules/datadog-configuration/outputs.tf new file mode 100644 index 000000000..5fc7c32a0 --- /dev/null +++ b/modules/datadog-configuration/outputs.tf @@ -0,0 +1,29 @@ +output "region" { + value = var.region + description = "The region where the keys will be created" +} + +output "datadog_secrets_store_type" { + value = var.datadog_secrets_store_type + description = "The type of the secrets store to use for Datadog API and APP keys" +} + +output "datadog_api_url" { + value = local.datadog_api_url + description = "The URL of the Datadog API" +} + +output "datadog_app_key_location" { + value = local.datadog_app_key_name + description = "The Datadog APP key location in the secrets store" +} + +output "datadog_api_key_location" { + value = local.datadog_api_key_name + description = "The Datadog API key in the secrets store" +} + +output "datadog_site" { + value = local.datadog_site + description = "The Datadog site to use" +} diff --git a/modules/datadog-configuration/provider-datadog.tf b/modules/datadog-configuration/provider-datadog.tf new file mode 100644 index 000000000..852b643f2 --- /dev/null +++ b/modules/datadog-configuration/provider-datadog.tf @@ -0,0 +1,23 @@ +# module.iam_roles_datadog_secrets.terraform_profile_name +provider "aws" { + alias = "api_keys" + region = coalesce(var.datadog_secrets_source_store_account_region, var.region) + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles_datadog_secrets.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles_datadog_secrets.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles_datadog_secrets.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles_datadog_secrets" { + source = "../account-map/modules/iam-roles" + stage = var.datadog_secrets_source_store_account_stage + tenant = var.datadog_secrets_source_store_account_tenant + context = module.this.context +} diff --git a/modules/datadog-configuration/providers.tf b/modules/datadog-configuration/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/datadog-configuration/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/datadog-integration/ssm.tf b/modules/datadog-configuration/ssm.tf similarity index 91% rename from modules/datadog-integration/ssm.tf rename to modules/datadog-configuration/ssm.tf index eff496f6e..28e3ccc72 100644 --- a/modules/datadog-integration/ssm.tf +++ b/modules/datadog-configuration/ssm.tf @@ -32,14 +32,14 @@ module "store_write" { parameter_write = [ { name = local.datadog_api_key_name - value = data.aws_ssm_parameter.datadog_api_key[0].value + value = local.datadog_api_key type = "SecureString" overwrite = "true" description = "Datadog API key" }, { name = local.datadog_app_key_name - value = data.aws_ssm_parameter.datadog_app_key[0].value + value = local.datadog_app_key type = "SecureString" overwrite = "true" description = "Datadog APP key" diff --git a/modules/datadog-configuration/variables.tf b/modules/datadog-configuration/variables.tf new file mode 100644 index 000000000..2fa2752d5 --- /dev/null +++ b/modules/datadog-configuration/variables.tf @@ -0,0 +1,81 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "datadog_site_url" { + type = string + description = "The Datadog Site URL, https://docs.datadoghq.com/getting_started/site/" + default = null + + validation { + condition = var.datadog_site_url == null ? true : contains([ + "datadoghq.com", + "us3.datadoghq.com", + "us5.datadoghq.com", + "datadoghq.eu", + "ddog-gov.com" + ], var.datadog_site_url) + error_message = "Allowed values: null, `datadoghq.com`, `us3.datadoghq.com`, `us5.datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`." + } +} + +variable "datadog_secrets_store_type" { + type = string + description = "Secret Store type for Datadog API and app keys. Valid values: `SSM`, `ASM`" + default = "SSM" +} + +variable "datadog_secrets_source_store_account_region" { + type = string + description = "Region for holding Secret Store Datadog Keys, leave as null to use the same region as the stack" + default = null +} + +variable "datadog_secrets_source_store_account_stage" { + type = string + description = "Stage holding Secret Store for Datadog API and app keys." + default = "auto" +} + +variable "datadog_secrets_source_store_account_tenant" { + type = string + description = "Tenant holding Secret Store for Datadog API and app keys." + default = "core" +} + +variable "datadog_api_secret_key_source_pattern" { + type = string + description = "The format string (%v will be replaced by the var.datadog_api_secret_key) for the key of the Datadog API secret in the source account" + default = "/datadog/%v/datadog_api_key" +} + +variable "datadog_app_secret_key_source_pattern" { + type = string + description = "The format string (%v will be replaced by the var.datadog_app_secret_key) for the key of the Datadog APP secret in the source account" + default = "/datadog/%v/datadog_app_key" +} + +variable "datadog_api_secret_key_target_pattern" { + type = string + description = "The format string (%v will be replaced by the var.datadog_api_secret_key) for the key of the Datadog API secret in the target account" + default = "/datadog/datadog_api_key" +} + +variable "datadog_app_secret_key_target_pattern" { + type = string + description = "The format string (%v will be replaced by the var.datadog_api_secret_key) for the key of the Datadog APP secret in the target account" + default = "/datadog/datadog_app_key" +} + +variable "datadog_api_secret_key" { + type = string + description = "The name of the Datadog API secret" + default = "default" +} + +variable "datadog_app_secret_key" { + type = string + description = "The name of the Datadog APP secret" + default = "default" +} diff --git a/modules/datadog-configuration/versions.tf b/modules/datadog-configuration/versions.tf new file mode 100644 index 000000000..cc73ffd35 --- /dev/null +++ b/modules/datadog-configuration/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/datadog-integration/CHANGELOG.md b/modules/datadog-integration/CHANGELOG.md new file mode 100644 index 000000000..a42d26323 --- /dev/null +++ b/modules/datadog-integration/CHANGELOG.md @@ -0,0 +1,21 @@ +## PR [#814](https://github.com/cloudposse/terraform-aws-components/pull/814) + +### Possible Breaking Change + +The `module "datadog_integration"` and `module "store_write"` had been changed in an earlier PR from a module without a +`count` to a module with a `count` of zero or one. This PR changes it back to a module without a count. If you were +using the module with a `count` of zero or one, applying this new version will cause it be destroyed and recreated. This +should only cause a very brief outage in your Datadog monitoring. + +### New Integration Options + +This PR adds the following new integration options: + +- `cspm_resource_collection_enabled` - Enable Datadog Cloud Security Posture Management scanning of your AWS account. + See [announcement](https://www.datadoghq.com/product/cloud-security-management/cloud-security-posture-management/) for + details. +- `metrics_collection_enabled` - When enabled, a metric-by-metric crawl of the CloudWatch API pulls data and sends it to + Datadog. New metrics are pulled every ten minutes, on average. +- `resource_collection_enabled` - Some Datadog products leverage information about how your AWS resources ( such as S3 + Buckets, RDS snapshots, and CloudFront distributions) are configured. When `resource_collection_enabled` is `true`, + Datadog collects this information by making read-only API calls into your AWS account. diff --git a/modules/datadog-integration/README.md b/modules/datadog-integration/README.md index b47064d9b..e182591e3 100644 --- a/modules/datadog-integration/README.md +++ b/modules/datadog-integration/README.md @@ -1,17 +1,25 @@ -# Component: `datadog-integration` +--- +tags: + - component/datadog-integration + - layer/datadog + - provider/aws + - provider/datadog +--- -This component is responsible for provisioning Datadog AWS integrations. +# Component: `datadog-integration` -It's required that the DataDog API and APP secret keys are available in the `var.datadog_secrets_source_store_account` account -in AWS SSM Parameter Store at the `/datadog/%v/datadog_app_key` paths (where `%v` are the corresponding account names). +This component is responsible for provisioning Datadog AWS integrations. It depends on the `datadog-configuration` +component to get the Datadog API keys. -See Datadog's [documentation about provisioning keys](https://docs.datadoghq.com/account_management/api-app-keys) for more information. +See Datadog's [documentation about provisioning keys](https://docs.datadoghq.com/account_management/api-app-keys) for +more information. ## Usage **Stack Level**: Global -Here's an example snippet for how to use this component. It's suggested to apply this component to all accounts which you want to track AWS metrics with DataDog. +Here's an example snippet for how to use this component. It's suggested to apply this component to all accounts which +you want to track AWS metrics with DataDog. ```yaml components: @@ -21,48 +29,40 @@ components: spacelift: workspace_enabled: true vars: - datadog_secrets_store_type: SSM - datadog_secrets_source_store_account: "tools" - datadog_secrets_source_store_region: "us-west-2" - datadog_api_secret_key: "dev" - datadog_app_secret_key: "dev" + enabled: true ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | | [datadog](#requirement\_datadog) | >= 3.3.0 | ## Providers | Name | Version | |------|---------| -| [aws.api\_keys](#provider\_aws.api\_keys) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.9.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [datadog\_integration](#module\_datadog\_integration) | cloudposse/datadog-integration/aws | 0.18.0 | +| [datadog\_configuration](#module\_datadog\_configuration) | ../datadog-configuration/modules/datadog_keys | n/a | +| [datadog\_integration](#module\_datadog\_integration) | cloudposse/datadog-integration/aws | 1.2.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [iam\_roles\_datadog\_secrets](#module\_iam\_roles\_datadog\_secrets) | ../account-map/modules/iam-roles | n/a | -| [store\_write](#module\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.10.0 | +| [store\_write](#module\_store\_write) | cloudposse/ssm-parameter-store/aws | 0.11.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources | Name | Type | |------|------| -| [aws_secretsmanager_secret.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | -| [aws_secretsmanager_secret.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | -| [aws_secretsmanager_secret_version.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | -| [aws_secretsmanager_secret_version.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | -| [aws_ssm_parameter.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | -| [aws_ssm_parameter.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_regions.all](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/regions) | data source | ## Inputs @@ -73,16 +73,8 @@ components: | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [context\_host\_and\_filter\_tags](#input\_context\_host\_and\_filter\_tags) | Automatically add host and filter tags for these context keys | `list(string)` |
[
"namespace",
"tenant",
"stage"
]
| no | -| [datadog\_api\_secret\_key](#input\_datadog\_api\_secret\_key) | The name of the Datadog API secret | `string` | `"default"` | no | -| [datadog\_api\_secret\_key\_source\_pattern](#input\_datadog\_api\_secret\_key\_source\_pattern) | The format string (%v will be replaced by the var.datadog\_api\_secret\_key) for the key of the Datadog API secret in the source account | `string` | `"/datadog/%v/datadog_api_key"` | no | -| [datadog\_api\_secret\_key\_target\_pattern](#input\_datadog\_api\_secret\_key\_target\_pattern) | The format string (%v will be replaced by the var.datadog\_api\_secret\_key) for the key of the Datadog API secret in the target account | `string` | `"/datadog/datadog_api_key"` | no | -| [datadog\_app\_secret\_key](#input\_datadog\_app\_secret\_key) | The name of the Datadog APP secret | `string` | `"default"` | no | -| [datadog\_app\_secret\_key\_source\_pattern](#input\_datadog\_app\_secret\_key\_source\_pattern) | The format string (%v will be replaced by the var.datadog\_app\_secret\_key) for the key of the Datadog APP secret in the source account | `string` | `"/datadog/%v/datadog_app_key"` | no | -| [datadog\_app\_secret\_key\_target\_pattern](#input\_datadog\_app\_secret\_key\_target\_pattern) | The format string (%v will be replaced by the var.datadog\_api\_secret\_key) for the key of the Datadog APP secret in the target account | `string` | `"/datadog/datadog_app_key"` | no | +| [cspm\_resource\_collection\_enabled](#input\_cspm\_resource\_collection\_enabled) | Enable Datadog Cloud Security Posture Management scanning of your AWS account.
See [announcement](https://www.datadoghq.com/product/cloud-security-management/cloud-security-posture-management/) for details. | `bool` | `null` | no | | [datadog\_aws\_account\_id](#input\_datadog\_aws\_account\_id) | The AWS account ID Datadog's integration servers use for all integrations | `string` | `"464622532012"` | no | -| [datadog\_secrets\_source\_store\_account\_stage](#input\_datadog\_secrets\_source\_store\_account\_stage) | Stage holding Secret Store for Datadog API and app keys. | `string` | `"tools"` | no | -| [datadog\_secrets\_source\_store\_account\_tenant](#input\_datadog\_secrets\_source\_store\_account\_tenant) | Tenant holding Secret Store for Datadog API and app keys. | `string` | `"core"` | no | -| [datadog\_secrets\_store\_type](#input\_datadog\_secrets\_store\_type) | Secret Store type for Datadog API and app keys. Valid values: `SSM`, `ASM` | `string` | `"SSM"` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | @@ -91,17 +83,18 @@ components: | [filter\_tags](#input\_filter\_tags) | An array of EC2 tags (in the form `key:value`) that defines a filter that Datadog use when collecting metrics from EC2. Wildcards, such as ? (for single characters) and * (for multiple characters) can also be used | `list(string)` | `[]` | no | | [host\_tags](#input\_host\_tags) | An array of tags (in the form `key:value`) to add to all hosts and metrics reporting through this integration | `list(string)` | `[]` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [included\_regions](#input\_included\_regions) | An array of AWS regions to include in metrics collection | `list(string)` | `[]` | no | | [integrations](#input\_integrations) | List of AWS permission names to apply for different integrations (e.g. 'all', 'core') | `list(string)` |
[
"all"
]
| no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [metrics\_collection\_enabled](#input\_metrics\_collection\_enabled) | When enabled, a metric-by-metric crawl of the CloudWatch API pulls data and sends it
to Datadog. New metrics are pulled every ten minutes, on average. | `bool` | `null` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [resource\_collection\_enabled](#input\_resource\_collection\_enabled) | Some Datadog products leverage information about how your AWS resources
(such as S3 Buckets, RDS snapshots, and CloudFront distributions) are configured.
When `resource_collection_enabled` is `true`, Datadog collects this information
by making read-only API calls into your AWS account. | `bool` | `null` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -114,11 +107,12 @@ components: | [aws\_role\_name](#output\_aws\_role\_name) | Name of the AWS IAM Role for the Datadog integration | | [datadog\_external\_id](#output\_datadog\_external\_id) | Datadog integration external ID | - + ## References -* Datadog's [documentation about provisioning keys](https://docs.datadoghq.com/account_management/api-app-keys) -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/datadog-integration) - Cloud Posse's upstream component +- Datadog's [documentation about provisioning keys](https://docs.datadoghq.com/account_management/api-app-keys) +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/datadog-integration) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/datadog-integration/default.auto.tfvars b/modules/datadog-integration/default.auto.tfvars deleted file mode 100644 index 131af3ce0..000000000 --- a/modules/datadog-integration/default.auto.tfvars +++ /dev/null @@ -1,5 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - - diff --git a/modules/datadog-integration/main.tf b/modules/datadog-integration/main.tf index 3d5f0ddb9..18ed5e90a 100644 --- a/modules/datadog-integration/main.tf +++ b/modules/datadog-integration/main.tf @@ -1,30 +1,65 @@ +locals { + use_include_regions = length(var.included_regions) > 0 + all_regions = data.aws_regions.all.names + excluded_list_by_include = setsubtract(local.use_include_regions ? local.all_regions : [], var.included_regions) +} + +data "aws_regions" "all" { + all_regions = true +} + module "datadog_integration" { source = "cloudposse/datadog-integration/aws" - version = "0.18.0" + version = "1.2.0" + + enabled = module.this.enabled && length(var.integrations) > 0 datadog_aws_account_id = var.datadog_aws_account_id integrations = var.integrations filter_tags = local.filter_tags host_tags = local.host_tags - excluded_regions = var.excluded_regions + excluded_regions = concat(var.excluded_regions, tolist(local.excluded_list_by_include)) account_specific_namespace_rules = var.account_specific_namespace_rules + cspm_resource_collection_enabled = var.cspm_resource_collection_enabled + metrics_collection_enabled = var.metrics_collection_enabled + resource_collection_enabled = var.resource_collection_enabled context = module.this.context } locals { - enabled = module.this.enabled - asm_enabled = local.enabled && var.datadog_secrets_store_type == "ASM" - ssm_enabled = local.enabled && var.datadog_secrets_store_type == "SSM" - - # https://docs.datadoghq.com/account_management/api-app-keys/ - datadog_api_key = local.enabled ? (local.asm_enabled ? data.aws_secretsmanager_secret_version.datadog_api_key[0].secret_string : data.aws_ssm_parameter.datadog_api_key[0].value) : null - datadog_app_key = local.enabled ? (local.asm_enabled ? data.aws_secretsmanager_secret_version.datadog_app_key[0].secret_string : data.aws_ssm_parameter.datadog_app_key[0].value) : null + enabled = module.this.enabled # Get the context tags and skip tags that we don't want applied to every resource. # i.e. we don't want name since each metric would be called something other than this component's name. # i.e. we don't want environment since each metric would come from gbl or a region and this component is deployed in gbl. - context_tags = [for k, v in module.this.tags : "${lower(k)}:${v}" if contains(var.context_host_and_filter_tags, lower(k))] - filter_tags = distinct(concat(var.filter_tags, local.context_tags)) - host_tags = distinct(concat(var.host_tags, local.context_tags)) + context_tags = [ + for k, v in module.this.tags : "${lower(k)}:${v}" if contains(var.context_host_and_filter_tags, lower(k)) + ] + filter_tags = distinct(concat(var.filter_tags, local.context_tags)) + host_tags = distinct(concat(var.host_tags, local.context_tags)) +} + +module "store_write" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.11.0" + + parameter_write = [ + { + name = "/datadog/datadog_external_id" + value = join("", module.datadog_integration[*].datadog_external_id) + type = "String" + overwrite = "true" + description = "External identifier for our dd integration" + }, + { + name = "/datadog/aws_role_name" + value = join("", module.datadog_integration[*].aws_role_name) + type = "String" + overwrite = "true" + description = "Name of the AWS IAM role used by our dd integration" + } + ] + + context = module.this.context } diff --git a/modules/datadog-integration/outputs.tf b/modules/datadog-integration/outputs.tf index 96cf1426d..2a801eee8 100644 --- a/modules/datadog-integration/outputs.tf +++ b/modules/datadog-integration/outputs.tf @@ -1,14 +1,14 @@ output "aws_account_id" { - value = module.datadog_integration.aws_account_id + value = one(module.datadog_integration[*].aws_account_id) description = "AWS Account ID of the IAM Role for the Datadog integration" } output "aws_role_name" { - value = module.datadog_integration.aws_role_name + value = one(module.datadog_integration[*].aws_role_name) description = "Name of the AWS IAM Role for the Datadog integration" } output "datadog_external_id" { - value = module.datadog_integration.datadog_external_id + value = one(module.datadog_integration[*].datadog_external_id) description = "Datadog integration external ID" } diff --git a/modules/datadog-integration/provider-datadog.tf b/modules/datadog-integration/provider-datadog.tf new file mode 100644 index 000000000..0b4e862f8 --- /dev/null +++ b/modules/datadog-integration/provider-datadog.tf @@ -0,0 +1,12 @@ +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} diff --git a/modules/datadog-integration/providers.tf b/modules/datadog-integration/providers.tf index 613af8191..ef923e10a 100644 --- a/modules/datadog-integration/providers.tf +++ b/modules/datadog-integration/providers.tf @@ -1,54 +1,19 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null - dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] - content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) - } - } -} - -module "iam_roles" { - source = "../account-map/modules/iam-roles" - context = module.this.context -} + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -provider "aws" { - alias = "api_keys" - region = var.region - - profile = module.iam_roles_datadog_secrets.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles_datadog_secrets.terraform_profile_name) : null dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles_datadog_secrets.terraform_role_arn) + role_arn = assume_role.value } } } -module "iam_roles_datadog_secrets" { +module "iam_roles" { source = "../account-map/modules/iam-roles" - stage = var.datadog_secrets_source_store_account_stage - tenant = var.datadog_secrets_source_store_account_tenant context = module.this.context } - -provider "datadog" { - api_key = local.datadog_api_key - app_key = local.datadog_app_key - validate = local.enabled -} diff --git a/modules/datadog-integration/variables.tf b/modules/datadog-integration/variables.tf index e32abcfb8..8df990f95 100644 --- a/modules/datadog-integration/variables.tf +++ b/modules/datadog-integration/variables.tf @@ -33,68 +33,48 @@ variable "excluded_regions" { default = [] } +variable "included_regions" { + type = list(string) + description = "An array of AWS regions to include in metrics collection" + default = [] +} variable "account_specific_namespace_rules" { type = map(string) description = "An object, (in the form {\"namespace1\":true/false, \"namespace2\":true/false} ), that enables or disables metric collection for specific AWS namespaces for this AWS account only" default = {} } -variable "datadog_secrets_store_type" { - type = string - description = "Secret Store type for Datadog API and app keys. Valid values: `SSM`, `ASM`" - default = "SSM" -} - -variable "datadog_secrets_source_store_account_stage" { - type = string - description = "Stage holding Secret Store for Datadog API and app keys." - default = "tools" -} - -variable "datadog_secrets_source_store_account_tenant" { - type = string - description = "Tenant holding Secret Store for Datadog API and app keys." - default = "core" -} - -variable "datadog_api_secret_key_source_pattern" { - type = string - description = "The format string (%v will be replaced by the var.datadog_api_secret_key) for the key of the Datadog API secret in the source account" - default = "/datadog/%v/datadog_api_key" -} - -variable "datadog_app_secret_key_source_pattern" { - type = string - description = "The format string (%v will be replaced by the var.datadog_app_secret_key) for the key of the Datadog APP secret in the source account" - default = "/datadog/%v/datadog_app_key" +variable "context_host_and_filter_tags" { + type = list(string) + description = "Automatically add host and filter tags for these context keys" + default = ["namespace", "tenant", "stage"] } -variable "datadog_api_secret_key_target_pattern" { - type = string - description = "The format string (%v will be replaced by the var.datadog_api_secret_key) for the key of the Datadog API secret in the target account" - default = "/datadog/datadog_api_key" +variable "cspm_resource_collection_enabled" { + type = bool + default = null + description = <<-EOT + Enable Datadog Cloud Security Posture Management scanning of your AWS account. + See [announcement](https://www.datadoghq.com/product/cloud-security-management/cloud-security-posture-management/) for details. + EOT } -variable "datadog_app_secret_key_target_pattern" { - type = string - description = "The format string (%v will be replaced by the var.datadog_api_secret_key) for the key of the Datadog APP secret in the target account" - default = "/datadog/datadog_app_key" +variable "metrics_collection_enabled" { + type = bool + default = null + description = <<-EOT + When enabled, a metric-by-metric crawl of the CloudWatch API pulls data and sends it + to Datadog. New metrics are pulled every ten minutes, on average. + EOT } -variable "datadog_api_secret_key" { - type = string - description = "The name of the Datadog API secret" - default = "default" -} - -variable "datadog_app_secret_key" { - type = string - description = "The name of the Datadog APP secret" - default = "default" -} -# -variable "context_host_and_filter_tags" { - type = list(string) - description = "Automatically add host and filter tags for these context keys" - default = ["namespace", "tenant", "stage"] +variable "resource_collection_enabled" { + type = bool + default = null + description = <<-EOT + Some Datadog products leverage information about how your AWS resources + (such as S3 Buckets, RDS snapshots, and CloudFront distributions) are configured. + When `resource_collection_enabled` is `true`, Datadog collects this information + by making read-only API calls into your AWS account. + EOT } diff --git a/modules/datadog-integration/versions.tf b/modules/datadog-integration/versions.tf index 9b8e48942..20f566652 100644 --- a/modules/datadog-integration/versions.tf +++ b/modules/datadog-integration/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.9.0" } datadog = { source = "datadog/datadog" diff --git a/modules/datadog-lambda-forwarder/CHANGELOG.md b/modules/datadog-lambda-forwarder/CHANGELOG.md new file mode 100644 index 000000000..478db1e47 --- /dev/null +++ b/modules/datadog-lambda-forwarder/CHANGELOG.md @@ -0,0 +1,11 @@ +## PR [#814](https://github.com/cloudposse/terraform-aws-components/pull/814) + +### Fix for `enabled = false` or Destroy and Recreate + +Previously, when `enabled = false` was set, the component would not necessarily function as desired (deleting any +existing resources and not creating any new ones). Also, previously, when deleting the component, there was a race +condition where the log group could be deleted before the lambda function was deleted, causing the lambda function to +trigger automatic recreation of the log group. This would result in re-creation failing because Terraform would try to +create the log group but it already existed. + +These issues have been fixed in this PR. diff --git a/modules/datadog-lambda-forwarder/README.md b/modules/datadog-lambda-forwarder/README.md index 33a2e75db..75251a11f 100644 --- a/modules/datadog-lambda-forwarder/README.md +++ b/modules/datadog-lambda-forwarder/README.md @@ -1,8 +1,16 @@ -# Component: `datadog-lambda-forwarder` +--- +tags: + - component/datadog-lambda-forwarder + - layer/datadog + - provider/aws + - provider/datadog +--- -This component is responsible for provision all the necessary infrastructure to -deploy [Datadog Lambda forwarders](https://github.com/DataDog/datadog-serverless-functions/tree/master/aws/logs_monitoring). +# Component: `datadog-lambda-forwarder` +This component is responsible for provision all the necessary infrastructure to deploy +[Datadog Lambda forwarders](https://github.com/DataDog/datadog-serverless-functions/tree/master/aws/logs_monitoring). It +depends on the `datadog-configuration` component to get the Datadog API keys. ## Usage @@ -26,14 +34,14 @@ components: # 3. CloudWatch Log Group `RDSOSMetrics` exists (it will be created by AWS automatically when RDS Enhanced Monitoring is enabled) forwarder_rds_enabled: true forwarder_log_enabled: true - forwarder_vpc_enabled: true + forwarder_vpc_logs_enabled: true cloudwatch_forwarder_log_groups: rds-enhanced-monitoring: name: "RDSOSMetrics" filter_pattern: "" eks-cluster: # Use either `name` or `name_prefix` with `name_suffix` - # If `name_prefix` with `name_suffix` are used, the final `name` will be constructed using `name_prefix` + context + `name_suffix`, + # If `name_prefix` with `name_suffix` are used, the final `name` will be constructed using `name_prefix` + context + `name_suffix`, # e.g. "/aws/eks/eg-ue2-prod-eks-cluster/cluster" name_prefix: "/aws/eks/" name_suffix: "eks-cluster/cluster" @@ -41,35 +49,43 @@ components: transfer-sftp: name: "/aws/transfer/s-xxxxxxxxxxxx" filter_pattern: "" - dd_api_key_source: - resource: "ssm" - identifier: "datadog/datadog_api_key" ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [datadog](#requirement\_datadog) | >= 3.3.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [datadog](#provider\_datadog) | >= 3.3.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [datadog\_lambda\_forwarder](#module\_datadog\_lambda\_forwarder) | cloudposse/datadog-lambda-forwarder/aws | 0.12.0 | +| [datadog-integration](#module\_datadog-integration) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [datadog\_configuration](#module\_datadog\_configuration) | ../datadog-configuration/modules/datadog_keys | n/a | +| [datadog\_lambda\_forwarder](#module\_datadog\_lambda\_forwarder) | cloudposse/datadog-lambda-forwarder/aws | 1.5.3 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [log\_group\_prefix](#module\_log\_group\_prefix) | cloudposse/label/null | 0.25.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources -No resources. +| Name | Type | +|------|------| +| [datadog_integration_aws_lambda_arn.log_collector](https://registry.terraform.io/providers/datadog/datadog/latest/docs/resources/integration_aws_lambda_arn) | resource | +| [datadog_integration_aws_lambda_arn.rds_collector](https://registry.terraform.io/providers/datadog/datadog/latest/docs/resources/integration_aws_lambda_arn) | resource | +| [datadog_integration_aws_lambda_arn.vpc_logs_collector](https://registry.terraform.io/providers/datadog/datadog/latest/docs/resources/integration_aws_lambda_arn) | resource | +| [datadog_integration_aws_log_collection.main](https://registry.terraform.io/providers/datadog/datadog/latest/docs/resources/integration_aws_log_collection) | resource | ## Inputs @@ -77,21 +93,21 @@ No resources. |------|-------------|------|---------|:--------:| | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [cloudwatch\_forwarder\_event\_patterns](#input\_cloudwatch\_forwarder\_event\_patterns) | Map of title to CloudWatch Event patterns to forward to Datadog. Event structure from here: https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html#CloudWatchEventsPatterns
Example:
hcl
cloudwatch_forwarder_event_rules = {
"guardduty" = {
source = ["aws.guardduty"]
detail-type = ["GuardDuty Finding"]
}
"ec2-terminated" = {
source = ["aws.ec2"]
detail-type = ["EC2 Instance State-change Notification"]
detail = {
state = ["terminated"]
}
}
}
|
map(object({
version = optional(list(string))
id = optional(list(string))
detail-type = optional(list(string))
source = optional(list(string))
account = optional(list(string))
time = optional(list(string))
region = optional(list(string))
resources = optional(list(string))
detail = optional(map(list(string)))
}))
| `{}` | no | | [cloudwatch\_forwarder\_log\_groups](#input\_cloudwatch\_forwarder\_log\_groups) | Map of CloudWatch Log Groups with a filter pattern that the Lambda forwarder will send logs from. For example: { mysql1 = { name = "/aws/rds/maincluster", filter\_pattern = "" } | `map(map(string))` | `{}` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [context\_tags](#input\_context\_tags) | List of context tags to add to each monitor | `set(string)` |
[
"namespace",
"tenant",
"environment",
"stage"
]
| no | | [context\_tags\_enabled](#input\_context\_tags\_enabled) | Whether to add context tags to add to each monitor | `bool` | `true` | no | +| [datadog\_forwarder\_lambda\_environment\_variables](#input\_datadog\_forwarder\_lambda\_environment\_variables) | Map of environment variables to pass to the Lambda Function | `map(string)` | `{}` | no | | [dd\_api\_key\_kms\_ciphertext\_blob](#input\_dd\_api\_key\_kms\_ciphertext\_blob) | CiphertextBlob stored in environment variable DD\_KMS\_API\_KEY used by the lambda function, along with the KMS key, to decrypt Datadog API key | `string` | `""` | no | -| [dd\_api\_key\_source](#input\_dd\_api\_key\_source) | One of: ARN for AWS Secrets Manager (asm) to retrieve the Datadog (DD) api key, ARN for the KMS (kms) key used to decrypt the ciphertext\_blob of the api key, or the name of the SSM (ssm) parameter used to retrieve the Datadog API key |
object({
resource = string
identifier = string
})
|
{
"identifier": "",
"resource": ""
}
| no | | [dd\_artifact\_filename](#input\_dd\_artifact\_filename) | The Datadog artifact filename minus extension | `string` | `"aws-dd-forwarder"` | no | -| [dd\_forwarder\_version](#input\_dd\_forwarder\_version) | Version tag of Datadog lambdas to use. https://github.com/DataDog/datadog-serverless-functions/releases | `string` | `"3.40.0"` | no | +| [dd\_forwarder\_version](#input\_dd\_forwarder\_version) | Version tag of Datadog lambdas to use. https://github.com/DataDog/datadog-serverless-functions/releases | `string` | `"3.66.0"` | no | | [dd\_module\_name](#input\_dd\_module\_name) | The Datadog GitHub repository name | `string` | `"datadog-serverless-functions"` | no | | [dd\_tags\_map](#input\_dd\_tags\_map) | A map of Datadog tags to apply to all logs forwarded to Datadog | `map(string)` | `{}` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [forwarder\_lambda\_datadog\_host](#input\_forwarder\_lambda\_datadog\_host) | Datadog Site to send data to. Possible values are `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com` and `ddog-gov.com` | `string` | `"datadoghq.com"` | no | | [forwarder\_lambda\_debug\_enabled](#input\_forwarder\_lambda\_debug\_enabled) | Whether to enable or disable debug for the Lambda forwarder | `bool` | `false` | no | | [forwarder\_log\_artifact\_url](#input\_forwarder\_log\_artifact\_url) | The URL for the code of the Datadog forwarder for Logs. It can be a local file, URL or git repo | `string` | `null` | no | | [forwarder\_log\_enabled](#input\_forwarder\_log\_enabled) | Flag to enable or disable Datadog log forwarder | `bool` | `false` | no | @@ -106,22 +122,23 @@ No resources. | [forwarder\_vpc\_logs\_layers](#input\_forwarder\_vpc\_logs\_layers) | List of Lambda Layer Version ARNs (maximum of 5) to attach to Datadog VPC flow log forwarder lambda function | `list(string)` | `[]` | no | | [forwarder\_vpclogs\_filter\_pattern](#input\_forwarder\_vpclogs\_filter\_pattern) | Filter pattern for Lambda forwarder VPC Logs | `string` | `""` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kms\_key\_id](#input\_kms\_key\_id) | Optional KMS key ID to encrypt Datadog Lambda function logs | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [lambda\_arn\_enabled](#input\_lambda\_arn\_enabled) | Enable adding the Lambda Arn to this account integration | `bool` | `true` | no | | [lambda\_policy\_source\_json](#input\_lambda\_policy\_source\_json) | Additional IAM policy document that can optionally be passed and merged with the created policy document | `string` | `""` | no | | [lambda\_reserved\_concurrent\_executions](#input\_lambda\_reserved\_concurrent\_executions) | Amount of reserved concurrent executions for the lambda function. A value of 0 disables Lambda from being triggered and -1 removes any concurrency limitations. Defaults to Unreserved Concurrency Limits -1 | `number` | `-1` | no | -| [lambda\_runtime](#input\_lambda\_runtime) | Runtime environment for Datadog Lambda | `string` | `"python3.7"` | no | +| [lambda\_runtime](#input\_lambda\_runtime) | Runtime environment for Datadog Lambda | `string` | `"python3.8"` | no | +| [log\_collection\_services](#input\_log\_collection\_services) | List of log collection services to enable | `list(string)` |
[
"apigw-access-logs",
"apigw-execution-logs",
"elbv2",
"elb",
"cloudfront",
"lambda",
"redshift",
"s3"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [s3\_bucket\_kms\_arns](#input\_s3\_bucket\_kms\_arns) | List of KMS key ARNs for s3 bucket encryption | `list(string)` | `[]` | no | -| [s3\_buckets](#input\_s3\_buckets) | The names and ARNs of S3 buckets to forward logs to Datadog | `list(string)` | `null` | no | +| [s3\_buckets](#input\_s3\_buckets) | The names of S3 buckets to forward logs to Datadog | `list(string)` | `[]` | no | +| [s3\_buckets\_with\_prefixes](#input\_s3\_buckets\_with\_prefixes) | The names S3 buckets and prefix to forward logs to Datadog | `map(object({ bucket_name : string, bucket_prefix : string }))` | `{}` | no | | [security\_group\_ids](#input\_security\_group\_ids) | List of security group IDs to use when the Lambda Function runs in a VPC | `list(string)` | `null` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [subnet\_ids](#input\_subnet\_ids) | List of subnet IDs to use when deploying the Lambda Function in a VPC | `list(string)` | `null` | no | @@ -141,11 +158,12 @@ No resources. | [lambda\_forwarder\_vpc\_log\_function\_arn](#output\_lambda\_forwarder\_vpc\_log\_function\_arn) | Datadog Lambda forwarder VPC Flow Logs function ARN | | [lambda\_forwarder\_vpc\_log\_function\_name](#output\_lambda\_forwarder\_vpc\_log\_function\_name) | Datadog Lambda forwarder VPC Flow Logs function name | - + ## References -* Datadog's [documentation about provisioning keys](https://docs.datadoghq.com/account_management/api-app-keys -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/datadog-lambda-forwarder) - Cloud Posse's upstream component +- Datadog's [documentation about provisioning keys](https://docs.datadoghq.com/account_management/api-app-keys) +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/datadog-lambda-forwarder) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/datadog-lambda-forwarder/main.tf b/modules/datadog-lambda-forwarder/main.tf index dd0157fb0..62628c666 100644 --- a/modules/datadog-lambda-forwarder/main.tf +++ b/modules/datadog-lambda-forwarder/main.tf @@ -1,4 +1,7 @@ locals { + enabled = module.this.enabled + lambda_arn_enabled = local.enabled && var.lambda_arn_enabled + # If any keys contain name_suffix, then use a null label to get the label prefix, and create # the appropriate input for the upstream module. cloudwatch_forwarder_log_groups = { @@ -38,16 +41,20 @@ module "log_group_prefix" { module "datadog_lambda_forwarder" { source = "cloudposse/datadog-lambda-forwarder/aws" - version = "0.12.0" + version = "1.5.3" - cloudwatch_forwarder_log_groups = local.cloudwatch_forwarder_log_groups - dd_api_key_kms_ciphertext_blob = var.dd_api_key_kms_ciphertext_blob - dd_api_key_source = var.dd_api_key_source + cloudwatch_forwarder_log_groups = local.cloudwatch_forwarder_log_groups + cloudwatch_forwarder_event_patterns = var.cloudwatch_forwarder_event_patterns + dd_api_key_kms_ciphertext_blob = var.dd_api_key_kms_ciphertext_blob + dd_api_key_source = { + resource = lower(module.datadog_configuration.datadog_secrets_store_type) + identifier = module.datadog_configuration.datadog_api_key_location + } dd_artifact_filename = var.dd_artifact_filename dd_forwarder_version = var.dd_forwarder_version dd_module_name = var.dd_module_name dd_tags_map = local.dd_tags_map - forwarder_lambda_datadog_host = var.forwarder_lambda_datadog_host + forwarder_lambda_datadog_host = module.datadog_configuration.datadog_site forwarder_lambda_debug_enabled = var.forwarder_lambda_debug_enabled forwarder_log_artifact_url = var.forwarder_log_artifact_url forwarder_log_enabled = var.forwarder_log_enabled @@ -67,10 +74,45 @@ module "datadog_lambda_forwarder" { lambda_runtime = var.lambda_runtime s3_bucket_kms_arns = var.s3_bucket_kms_arns s3_buckets = var.s3_buckets + s3_buckets_with_prefixes = var.s3_buckets_with_prefixes security_group_ids = var.security_group_ids subnet_ids = var.subnet_ids tracing_config_mode = var.tracing_config_mode vpclogs_cloudwatch_log_group = var.vpclogs_cloudwatch_log_group + datadog_forwarder_lambda_environment_variables = var.datadog_forwarder_lambda_environment_variables + + api_key_ssm_arn = module.datadog_configuration.api_key_ssm_arn + context = module.this.context } + +# Create a new Datadog - Amazon Web Services integration Lambda ARN +resource "datadog_integration_aws_lambda_arn" "rds_collector" { + count = local.lambda_arn_enabled && var.forwarder_rds_enabled ? 1 : 0 + + account_id = module.datadog-integration.outputs.aws_account_id + lambda_arn = module.datadog_lambda_forwarder.lambda_forwarder_rds_function_arn +} + +resource "datadog_integration_aws_lambda_arn" "vpc_logs_collector" { + count = local.lambda_arn_enabled && var.forwarder_vpc_logs_enabled ? 1 : 0 + + account_id = module.datadog-integration.outputs.aws_account_id + lambda_arn = module.datadog_lambda_forwarder.lambda_forwarder_vpc_log_function_arn +} + +resource "datadog_integration_aws_lambda_arn" "log_collector" { + count = local.lambda_arn_enabled && var.forwarder_log_enabled ? 1 : 0 + + account_id = module.datadog-integration.outputs.aws_account_id + lambda_arn = module.datadog_lambda_forwarder.lambda_forwarder_log_function_arn +} + +resource "datadog_integration_aws_log_collection" "main" { + count = local.lambda_arn_enabled ? 1 : 0 + account_id = module.datadog-integration.outputs.aws_account_id + services = var.log_collection_services + + depends_on = [module.datadog_lambda_forwarder] +} diff --git a/modules/datadog-lambda-forwarder/provider-datadog.tf b/modules/datadog-lambda-forwarder/provider-datadog.tf new file mode 100644 index 000000000..0b4e862f8 --- /dev/null +++ b/modules/datadog-lambda-forwarder/provider-datadog.tf @@ -0,0 +1,12 @@ +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} diff --git a/modules/datadog-lambda-forwarder/providers.tf b/modules/datadog-lambda-forwarder/providers.tf index efa9ede5d..ef923e10a 100644 --- a/modules/datadog-lambda-forwarder/providers.tf +++ b/modules/datadog-lambda-forwarder/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/datadog-lambda-forwarder/remote-state.tf b/modules/datadog-lambda-forwarder/remote-state.tf new file mode 100644 index 000000000..da85c90da --- /dev/null +++ b/modules/datadog-lambda-forwarder/remote-state.tf @@ -0,0 +1,9 @@ +module "datadog-integration" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "datadog-integration" + + environment = module.iam_roles.global_environment_name + context = module.this.context +} diff --git a/modules/datadog-lambda-forwarder/variables.tf b/modules/datadog-lambda-forwarder/variables.tf index cb4748f28..69d8410e2 100644 --- a/modules/datadog-lambda-forwarder/variables.tf +++ b/modules/datadog-lambda-forwarder/variables.tf @@ -25,7 +25,7 @@ variable "lambda_reserved_concurrent_executions" { variable "lambda_runtime" { type = string description = "Runtime environment for Datadog Lambda" - default = "python3.7" + default = "python3.8" } variable "tracing_config_mode" { @@ -34,43 +34,6 @@ variable "tracing_config_mode" { default = "PassThrough" } -variable "dd_api_key_source" { - description = "One of: ARN for AWS Secrets Manager (asm) to retrieve the Datadog (DD) api key, ARN for the KMS (kms) key used to decrypt the ciphertext_blob of the api key, or the name of the SSM (ssm) parameter used to retrieve the Datadog API key" - type = object({ - resource = string - identifier = string - }) - - default = { - resource = "" - identifier = "" - } - - # Resource can be one of kms, asm, ssm ("" to disable all lambda resources) - validation { - condition = can(regex("(kms|asm|ssm)", var.dd_api_key_source.resource)) || var.dd_api_key_source.resource == "" - error_message = "Provide one, and only one, ARN for (kms, asm) or name (ssm) to retrieve or decrypt Datadog api key." - } - - # Check KMS ARN format - validation { - condition = var.dd_api_key_source.resource == "kms" ? can(regex("arn:.*:kms:.*:key/.*", var.dd_api_key_source.identifier)) : true - error_message = "ARN for KMS key does not appear to be valid format (example: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab)." - } - - # Check ASM ARN format - validation { - condition = var.dd_api_key_source.resource == "asm" ? can(regex("arn:.*:secretsmanager:.*:secret:.*", var.dd_api_key_source.identifier)) : true - error_message = "ARN for AWS Secrets Manager (asm) does not appear to be valid format (example: arn:aws:secretsmanager:us-west-2:111122223333:secret:aes128-1a2b3c)." - } - - # Check SSM name format - validation { - condition = var.dd_api_key_source.resource == "ssm" ? can(regex("^[a-zA-Z0-9_./-]+$", var.dd_api_key_source.identifier)) : true - error_message = "Name for SSM parameter does not appear to be valid format, acceptable characters are `a-zA-Z0-9_.-` and `/` to delineate hierarchies." - } -} - variable "dd_api_key_kms_ciphertext_blob" { type = string description = "CiphertextBlob stored in environment variable DD_KMS_API_KEY used by the lambda function, along with the KMS key, to decrypt Datadog API key" @@ -92,7 +55,7 @@ variable "dd_module_name" { variable "dd_forwarder_version" { type = string description = "Version tag of Datadog lambdas to use. https://github.com/DataDog/datadog-serverless-functions/releases" - default = "3.40.0" + default = "3.66.0" } variable "forwarder_log_enabled" { @@ -127,8 +90,14 @@ variable "kms_key_id" { variable "s3_buckets" { type = list(string) - description = "The names and ARNs of S3 buckets to forward logs to Datadog" - default = null + description = "The names of S3 buckets to forward logs to Datadog" + default = [] +} + +variable "s3_buckets_with_prefixes" { + type = map(object({ bucket_name : string, bucket_prefix : string })) + description = "The names S3 buckets and prefix to forward logs to Datadog" + default = {} } variable "s3_bucket_kms_arns" { @@ -145,6 +114,40 @@ variable "cloudwatch_forwarder_log_groups" { default = {} } +variable "cloudwatch_forwarder_event_patterns" { + type = map(object({ + version = optional(list(string)) + id = optional(list(string)) + detail-type = optional(list(string)) + source = optional(list(string)) + account = optional(list(string)) + time = optional(list(string)) + region = optional(list(string)) + resources = optional(list(string)) + detail = optional(map(list(string))) + })) + description = <<-EOF + Map of title to CloudWatch Event patterns to forward to Datadog. Event structure from here: https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html#CloudWatchEventsPatterns + Example: + ```hcl + cloudwatch_forwarder_event_rules = { + "guardduty" = { + source = ["aws.guardduty"] + detail-type = ["GuardDuty Finding"] + } + "ec2-terminated" = { + source = ["aws.ec2"] + detail-type = ["EC2 Instance State-change Notification"] + detail = { + state = ["terminated"] + } + } + } + ``` + EOF + default = {} +} + variable "forwarder_lambda_debug_enabled" { type = bool description = "Whether to enable or disable debug for the Lambda forwarder" @@ -181,16 +184,6 @@ variable "lambda_policy_source_json" { default = "" } -variable "forwarder_lambda_datadog_host" { - type = string - description = "Datadog Site to send data to. Possible values are `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com` and `ddog-gov.com`" - default = "datadoghq.com" - validation { - condition = contains(["datadoghq.com", "datadoghq.eu", "us3.datadoghq.com", "ddog-gov.com"], var.forwarder_lambda_datadog_host) - error_message = "Invalid host: possible values are `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com` and `ddog-gov.com`." - } -} - variable "forwarder_log_layers" { type = list(string) description = "List of Lambda Layer Version ARNs (maximum of 5) to attach to Datadog log forwarder lambda function" @@ -238,3 +231,37 @@ variable "context_tags" { description = "List of context tags to add to each monitor" default = ["namespace", "tenant", "environment", "stage"] } + +variable "lambda_arn_enabled" { + type = bool + description = "Enable adding the Lambda Arn to this account integration" + default = true +} + +# No Datasource for this (yet?) +/** +curl -X GET "${DD_API_URL}/api/v1/integration/aws/logs/services" \ +-H "Accept: application/json" \ +-H "DD-API-KEY: ${DD_API_KEY}" \ +-H "DD-APPLICATION-KEY: ${DD_APP_KEY}" | jq '.[] | .id' +**/ +variable "log_collection_services" { + type = list(string) + description = "List of log collection services to enable" + default = [ + "apigw-access-logs", + "apigw-execution-logs", + "elbv2", + "elb", + "cloudfront", + "lambda", + "redshift", + "s3" + ] +} + +variable "datadog_forwarder_lambda_environment_variables" { + type = map(string) + default = {} + description = "Map of environment variables to pass to the Lambda Function" +} diff --git a/modules/datadog-lambda-forwarder/versions.tf b/modules/datadog-lambda-forwarder/versions.tf index d5cde7755..f636a1364 100644 --- a/modules/datadog-lambda-forwarder/versions.tf +++ b/modules/datadog-lambda-forwarder/versions.tf @@ -4,7 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.0" + version = ">= 4.0" + } + datadog = { + source = "datadog/datadog" + version = ">= 3.3.0" } } } diff --git a/modules/datadog-logs-archive/README.md b/modules/datadog-logs-archive/README.md new file mode 100644 index 000000000..6b0a3b482 --- /dev/null +++ b/modules/datadog-logs-archive/README.md @@ -0,0 +1,142 @@ +--- +tags: + - component/datadog-logs-archive + - layer/datadog + - provider/aws + - provider/datadog +--- + +# Component: `datadog-logs-archive` + +This component is responsible for provisioning Datadog Log Archives. It creates a single log archive pipeline for each +AWS account. If the `catchall` flag is set, it creates a catchall archive within the same S3 bucket. + +Each log archive filters for the tag `env:$env` where $env is the environment/account name (ie sbx, prd, tools, etc), as +well as any tags identified in the additional_tags key. The `catchall` archive, as the name implies, filters for '\*'. + +A second bucket is created for cloudtrail, and a cloudtrail is configured to monitor the log archive bucket and log +activity to the cloudtrail bucket. To forward these cloudtrail logs to datadog, the cloudtrail bucket's id must be added +to the s3_buckets key for our datadog-lambda-forwarder component. + +Both buckets support object lock, with overridable defaults of COMPLIANCE mode with a duration of 7 days. + +## Prerequisites + +- Datadog integration set up in target environment + - We rely on the datadog api and app keys added by our datadog integration component + +## Issues, Gotchas, Good-to-Knows + +### Destroy/reprovision process + +Because of the protections for S3 buckets, if we want to destroy/replace our bucket, we need to do so in two passes or +destroy the bucket manually and then use terraform to clean up the rest. If reprovisioning a recently provisioned +bucket, the two-pass process works well. If the bucket has a full day or more of logs, though, deleting it manually +first will avoid terraform timeouts, and then the terraform process can be used to clean up everything else. + +#### Two step process to destroy via terraform + +- first set `s3_force_destroy` var to true and apply +- next set `enabled` to false and apply or use tf destroy + +## Usage + +**Stack Level**: Global + +Here's an example snippet for how to use this component. It's suggested to apply this component to all accounts from +which Datadog receives logs. + +```yaml +components: + terraform: + datadog-logs-archive: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + # additional_query_tags: + # - "forwardername:*-dev-datadog-lambda-forwarder-logs" + # - "account:123456789012" +``` + +## Requirements + +| Name | Version | +| --------- | --------- | +| terraform | >= 0.13.0 | +| aws | >= 2.0 | +| datadog | >= 3.3.0 | +| local | >= 1.3 | + +## Providers + +| Name | Version | +| ------- | -------- | +| aws | >= 2.0 | +| datadog | >= 3.7.0 | +| http | >= 2.1.0 | + +## Modules + +| Name | Source | Version | +| -------------------- | ----------------------------------- | ------- | +| cloudtrail | cloudposse/cloudtrail/aws | 0.21.0 | +| cloudtrail_s3_bucket | cloudposse/cloudtrail-s3-bucket/aws | 0.23.1 | +| iam_roles | ../account-map/modules/iam-roles | n/a | +| s3_bucket | cloudposse/s3-bucket/aws | 0.46.0 | +| this | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +| --------------------------------------- | ----------- | +| aws_caller_identity.current | data source | +| aws_partition.current | data source | +| aws_ssm_parameter.datadog_api_key | data source | +| aws_ssm_parameter.datadog_app_key | data source | +| aws_ssm_parameter.datadog_aws_role_name | data source | +| aws_ssm_parameter.datadog_external_id | data source | +| datadog_logs_archive.catchall_archive | resource | +| datadog_logs_archive.logs_archive | resource | +| http.current_order | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +| --------------------------- | ----------------------------------------------------------------------------------------------------------------------- | -------- | ------------ | ---------------- | +| additional_query_tags | Additional tags to include in query for logs for this archive | `list` | [] | no | +| catchall | Set to true to enable a catchall for logs unmatched by any queries. This should only be used in one environment/account | `bool` | false | no | +| datadog_aws_account_id | The AWS account ID Datadog's integration servers use for all integrations | `string` | 464622532012 | no | +| enable_glacier_transition | Enable/disable transition to glacier. Has no effect unless `lifecycle_rules_enabled` set to true | `bool` | true | no | +| glacier_transition_days | Number of days after which to transition objects to glacier storage | `number` | 365 | no | +| lifecycle_rules_enabled | Enable/disable lifecycle management rules for s3 objects | `bool` | true | no | +| object_lock_days_archive | Set duration of archive bucket object lock | `number` | 7 | yes | +| object_lock_days_cloudtrail | Set duration of cloudtrail bucket object lock | `number` | 7 | yes | +| object_lock_mode_archive | Set mode of archive bucket object lock | `string` | COMPLIANCE | yes | +| object_lock_mode_cloudtrail | Set mode of cloudtrail bucket object lock | `string` | COMPLIANCE | yes | +| s3_force_destroy | Set to true to delete non-empty buckets when `enabled` is set to false | `bool` | false | for destroy only | + +## Outputs + +| Name | Description | +| ----------------------------- | ----------------------------------------------------------- | +| archive_id | The ID of the environment-specific log archive | +| bucket_arn | The ARN of the bucket used for log archive storage | +| bucket_domain_name | The FQDN of the bucket used for log archive storage | +| bucket_id | The ID (name) of the bucket used for log archive storage | +| bucket_region | The region of the bucket used for log archive storage | +| cloudtrail_bucket_arn | The ARN of the bucket used for cloudtrail log storage | +| cloudtrail_bucket_domain_name | The FQDN of the bucket used for cloudtrail log storage | +| cloudtrail_bucket_id | The ID (name) of the bucket used for cloudtrail log storage | +| catchall_id | The ID of the catchall log archive | + +## References + +- [cloudposse/s3-bucket/aws](https://registry.terraform.io/modules/cloudposse/s3-bucket/aws/latest) - Cloud Posse's S3 + component +- [datadog_logs_archive resource] + (https://registry.terraform.io/providers/DataDog/datadog/latest/docs/resources/logs_archive) - Datadog's provider + documentation for the datadog_logs_archive resource + +[](https://cpco.io/component) diff --git a/modules/datadog-logs-archive/context.tf b/modules/datadog-logs-archive/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/datadog-logs-archive/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/datadog-logs-archive/main.tf b/modules/datadog-logs-archive/main.tf new file mode 100644 index 000000000..dfc9726de --- /dev/null +++ b/modules/datadog-logs-archive/main.tf @@ -0,0 +1,358 @@ + +locals { + enabled = module.this.enabled + + aws_account_id = join("", data.aws_caller_identity.current.*.account_id) + aws_partition = join("", data.aws_partition.current.*.partition) + + datadog_aws_role_name = nonsensitive(join("", data.aws_ssm_parameter.datadog_aws_role_name.*.value)) + principal_names = [ + format("arn:${local.aws_partition}:iam::%s:role/${local.datadog_aws_role_name}", local.aws_account_id), + ] + + privileged_principal_arns = [ + { + (local.principal_names[0]) = [""] + } + ] + + # in case enabled: false and we have no current order to lookup + data_current_order_body = one(data.http.current_order.*.response_body) == null ? {} : jsondecode(data.http.current_order[0].response_body) + # in case there is no response (valid http request but no existing data) + current_order_data = lookup(local.data_current_order_body, "data", null) + + non_catchall_ids = local.enabled ? [for x in local.current_order_data : x.id if x.attributes.name != "catchall"] : [] + catchall_id = local.enabled ? [for x in local.current_order_data : x.id if x.attributes.name == "catchall"] : [] + ordered_ids = concat(local.non_catchall_ids, local.catchall_id) + + policy = local.enabled ? jsondecode(data.aws_iam_policy_document.default[0].json) : null +} + +# We use the http data source due to lack of a data source for datadog_logs_archive_order +# While the data source does exist, it doesn't provide useful information, nor how to lookup the id of a log archive order +# This fetches the current order from DD's api so we can shuffle it around if needed to +# keep the catchall in last place. +data "http" "current_order" { + count = local.enabled ? 1 : 0 + + url = format("https://api.%s/api/v2/logs/config/archives", module.datadog_configuration.datadog_site) + depends_on = [datadog_logs_archive.logs_archive, datadog_logs_archive.catchall_archive] + request_headers = { + Accept = "application/json", + DD-API-KEY = local.datadog_api_key, + DD-APPLICATION-KEY = local.datadog_app_key + } +} + +# IAM policy document to allow cloudtrail to read and write to the +# cloudtrail bucket + +data "aws_iam_policy_document" "default" { + count = module.this.enabled ? 1 : 0 + statement { + sid = "AWSCloudTrailAclCheck" + principals { + type = "Service" + identifiers = ["cloudtrail.amazonaws.com"] + } + + actions = [ + "s3:GetBucketAcl", + ] + + resources = [ + "arn:${local.aws_partition}:s3:::${module.this.id}-cloudtrail", + ] + } + + # We're using two AWSCloudTrailWrite statements with the only + # difference being the principals identifier to avoid a bug + # where TF frequently wants to reorder multiple principals + statement { + sid = "AWSCloudTrailWrite1" + principals { + type = "Service" + identifiers = ["cloudtrail.amazonaws.com"] + } + + actions = [ + "s3:PutObject", + ] + + resources = [ + "arn:${local.aws_partition}:s3:::${module.this.id}-cloudtrail/*", + ] + + condition { + test = "StringEquals" + variable = "s3:x-amz-acl" + values = [ + "bucket-owner-full-control", + ] + } + condition { + test = "StringLike" + variable = "aws:SourceArn" + values = [ + "arn:${local.aws_partition}:cloudtrail:*:${local.aws_account_id}:trail/*datadog-logs-archive", + ] + } + + } + + # We're using two AWSCloudTrailWrite statements with the only + # difference being the principals identifier to avoid a bug + # where TF frequently wants to reorder multiple principals + statement { + sid = "AWSCloudTrailWrite2" + principals { + type = "Service" + identifiers = ["config.amazonaws.com"] + } + + actions = [ + "s3:PutObject", + ] + + resources = [ + "arn:${local.aws_partition}:s3:::${module.this.id}-cloudtrail/*", + ] + + condition { + test = "StringEquals" + variable = "s3:x-amz-acl" + values = [ + "bucket-owner-full-control", + ] + } + condition { + test = "StringLike" + variable = "aws:SourceArn" + values = [ + "arn:${local.aws_partition}:cloudtrail:*:${local.aws_account_id}:trail/*datadog-logs-archive", + ] + } + + } +} + +module "bucket_policy" { + source = "cloudposse/iam-policy/aws" + version = "1.0.1" + + iam_policy_statements = try(lookup(local.policy, "Statement"), null) + + context = module.this.context +} + +data "aws_ssm_parameter" "datadog_aws_role_name" { + name = "/datadog/aws_role_name" +} + +data "aws_caller_identity" "current" { + count = local.enabled ? 1 : 0 +} + +data "aws_partition" "current" { + count = local.enabled ? 1 : 0 +} + +module "archive_bucket" { + source = "cloudposse/s3-bucket/aws" + version = "3.1.2" + + count = local.enabled ? 1 : 0 + + acl = "private" + enabled = local.enabled + force_destroy = var.s3_force_destroy + + lifecycle_rules = [ + { + prefix = null + enabled = var.lifecycle_rules_enabled + tags = {} + + abort_incomplete_multipart_upload_days = null + enable_glacier_transition = var.enable_glacier_transition + glacier_transition_days = var.glacier_transition_days + noncurrent_version_glacier_transition_days = 30 + enable_deeparchive_transition = false + deeparchive_transition_days = 0 + noncurrent_version_deeparchive_transition_days = 0 + enable_standard_ia_transition = false + standard_transition_days = 0 + enable_current_object_expiration = false + expiration_days = 0 + enable_noncurrent_version_expiration = false + noncurrent_version_expiration_days = 0 + }, + ] + + privileged_principal_actions = [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + ] + + privileged_principal_arns = local.privileged_principal_arns + + tags = { + managed-by = "terraform" + env = var.stage + service = "datadog-logs-archive" + part-of = "observability" + } + + user_enabled = false + versioning_enabled = true + + object_lock_configuration = { + mode = var.object_lock_mode_archive + days = var.object_lock_days_archive + years = null + } + + context = module.this.context +} + +module "cloudtrail_s3_bucket" { + source = "cloudposse/s3-bucket/aws" + version = "3.1.2" + + depends_on = [data.aws_iam_policy_document.default] + + count = local.enabled ? 1 : 0 + + name = "datadog-logs-archive-cloudtrail" + acl = "private" + enabled = local.enabled + force_destroy = var.s3_force_destroy + + source_policy_documents = data.aws_iam_policy_document.default.*.json + + lifecycle_rules = [ + { + prefix = null + enabled = var.lifecycle_rules_enabled + tags = {} + + abort_incomplete_multipart_upload_days = null + enable_glacier_transition = var.enable_glacier_transition + glacier_transition_days = 365 + noncurrent_version_glacier_transition_days = 365 + enable_deeparchive_transition = false + deeparchive_transition_days = 0 + noncurrent_version_deeparchive_transition_days = 0 + enable_standard_ia_transition = false + standard_transition_days = 0 + enable_current_object_expiration = false + expiration_days = 0 + enable_noncurrent_version_expiration = false + noncurrent_version_expiration_days = 0 + }, + ] + + tags = { + managed-by = "terraform" + env = var.stage + service = "datadog-logs-archive" + part-of = "observability" + } + + user_enabled = false + versioning_enabled = true + + label_key_case = "lower" + label_value_case = "lower" + + object_lock_configuration = { + mode = var.object_lock_mode_cloudtrail + days = var.object_lock_days_cloudtrail + years = null + } + + # Setting this to `true` causes permanent Terraform drift: terraform plan wants to create it, and then the next plan wants to destroy it. + # This happens b/c Terraform sees different MD5 hash of the request body + # https://stackoverflow.com/questions/66605497/terraform-always-says-changes-on-templatefile-for-s3-bucket-policy + # https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html#API_PutBucketPolicy_RequestSyntax + # https://hands-on.cloud/terraform-how-to-enforce-tls-https-for-aws-s3-bucket/ + # https://github.com/hashicorp/terraform/issues/4948 + # https://stackoverflow.com/questions/69986387/s3-bucket-terraform-plan-shows-inexistent-changes-on-default-values + # https://github.com/hashicorp/terraform/issues/5613 + allow_ssl_requests_only = false + + context = module.this.context +} + +module "cloudtrail" { + count = local.enabled ? 1 : 0 + # We explicitly declare this dependency on the entire + # cloudtrail_s3_bucket module because tf doesn't autodetect the + # dependency on the attachment of the bucket policy, leading to + # insufficient permissions issues on cloudtrail creation if it + # happens to be attempted prior to completion of the policy attachment. + depends_on = [module.cloudtrail_s3_bucket] + source = "cloudposse/cloudtrail/aws" + version = "0.21.0" + + enable_log_file_validation = true + include_global_service_events = false + is_multi_region_trail = false + enabled = local.enabled + enable_logging = true + s3_bucket_name = module.cloudtrail_s3_bucket[0].bucket_id + + event_selector = [ + { + include_management_events = true + read_write_type = "WriteOnly" + data_resource = [ + { + type = "AWS::S3::Object" + values = ["${module.archive_bucket[0].bucket_arn}/"] + } + ] + } + ] + + context = module.this.context +} + +resource "datadog_logs_archive_order" "archive_order" { + count = var.enabled ? 1 : 0 + archive_ids = local.ordered_ids +} + +resource "datadog_logs_archive" "logs_archive" { + count = local.enabled ? 1 : 0 + + name = var.stage + include_tags = true + rehydration_tags = ["rehydrated:true"] + query = join(" OR ", concat([join(":", ["env", var.stage]), join(":", ["account", local.aws_account_id])], var.additional_query_tags)) + + s3_archive { + bucket = module.archive_bucket[0].bucket_id + path = "/" + account_id = local.aws_account_id + role_name = local.datadog_aws_role_name + } +} + +resource "datadog_logs_archive" "catchall_archive" { + count = local.enabled && var.catchall_enabled ? 1 : 0 + + depends_on = [datadog_logs_archive.logs_archive] + name = "catchall" + include_tags = true + rehydration_tags = ["rehydrated:true"] + query = "*" + + s3_archive { + bucket = module.archive_bucket[0].bucket_id + path = "/catchall" + account_id = local.aws_account_id + role_name = local.datadog_aws_role_name + } +} diff --git a/modules/datadog-logs-archive/outputs.tf b/modules/datadog-logs-archive/outputs.tf new file mode 100644 index 000000000..ca2157ddf --- /dev/null +++ b/modules/datadog-logs-archive/outputs.tf @@ -0,0 +1,44 @@ +output "bucket_arn" { + value = local.enabled ? module.archive_bucket[0].bucket_arn : "" + description = "The ARN of the bucket used for log archive storage" +} + +output "bucket_domain_name" { + value = local.enabled ? module.archive_bucket[0].bucket_domain_name : "" + description = "The FQDN of the bucket used for log archive storage" +} + +output "bucket_id" { + value = local.enabled ? module.archive_bucket[0].bucket_id : "" + description = "The ID (name) of the bucket used for log archive storage" +} + +output "bucket_region" { + value = local.enabled ? module.archive_bucket[0].bucket_region : "" + description = "The region of the bucket used for log archive storage" +} + +output "cloudtrail_bucket_arn" { + value = local.enabled ? module.cloudtrail_s3_bucket[0].bucket_arn : "" + description = "The ARN of the bucket used for access logging via cloudtrail" +} + +output "cloudtrail_bucket_domain_name" { + value = local.enabled ? module.cloudtrail_s3_bucket[0].bucket_domain_name : "" + description = "The FQDN of the bucket used for access logging via cloudtrail" +} + +output "cloudtrail_bucket_id" { + value = local.enabled ? module.cloudtrail_s3_bucket[0].bucket_id : "" + description = "The ID (name) of the bucket used for access logging via cloudtrail" +} + +output "archive_id" { + value = local.enabled ? datadog_logs_archive.logs_archive[0].id : "" + description = "The ID of the environment-specific log archive" +} + +output "catchall_id" { + value = local.enabled && var.catchall_enabled ? datadog_logs_archive.catchall_archive[0].id : "" + description = "The ID of the catchall log archive" +} diff --git a/modules/datadog-logs-archive/provider-datadog.tf b/modules/datadog-logs-archive/provider-datadog.tf new file mode 100644 index 000000000..56729e3c5 --- /dev/null +++ b/modules/datadog-logs-archive/provider-datadog.tf @@ -0,0 +1,17 @@ +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +locals { + datadog_api_key = module.datadog_configuration.datadog_api_key + datadog_app_key = module.datadog_configuration.datadog_app_key +} + +provider "datadog" { + api_key = local.datadog_api_key + app_key = local.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} diff --git a/modules/datadog-logs-archive/providers.tf b/modules/datadog-logs-archive/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/datadog-logs-archive/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/datadog-logs-archive/variables.tf b/modules/datadog-logs-archive/variables.tf new file mode 100644 index 000000000..d2e59e024 --- /dev/null +++ b/modules/datadog-logs-archive/variables.tf @@ -0,0 +1,64 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "additional_query_tags" { + type = list(any) + description = "Additional tags to be used in the query for this archive" + default = [] +} + +variable "catchall_enabled" { + type = bool + description = "Set to true to enable a catchall for logs unmatched by any queries. This should only be used in one environment/account" + default = false +} + +variable "lifecycle_rules_enabled" { + type = bool + description = "Enable/disable lifecycle management rules for log archive s3 objects" + default = true +} + +variable "enable_glacier_transition" { + type = bool + description = "Enable/disable transition to glacier for log archive bucket. Has no effect unless lifecycle_rules_enabled set to true" + default = true +} + +variable "glacier_transition_days" { + type = number + description = "Number of days after which to transition objects to glacier storage in log archive bucket" + default = 365 +} + +variable "object_lock_days_archive" { + type = number + description = "Object lock duration for archive buckets in days" + default = 7 +} + +variable "object_lock_days_cloudtrail" { + type = number + description = "Object lock duration for cloudtrail buckets in days" + default = 7 +} + +variable "object_lock_mode_archive" { + type = string + description = "Object lock mode for archive bucket. Possible values are COMPLIANCE or GOVERNANCE" + default = "COMPLIANCE" +} + +variable "object_lock_mode_cloudtrail" { + type = string + description = "Object lock mode for cloudtrail bucket. Possible values are COMPLIANCE or GOVERNANCE" + default = "COMPLIANCE" +} + +variable "s3_force_destroy" { + type = bool + description = "Set to true to delete non-empty buckets when enabled is set to false" + default = false +} diff --git a/modules/datadog-logs-archive/versions.tf b/modules/datadog-logs-archive/versions.tf new file mode 100644 index 000000000..372817512 --- /dev/null +++ b/modules/datadog-logs-archive/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 0.13.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + datadog = { + source = "datadog/datadog" + version = ">= 3.19" + } + http = { + source = "hashicorp/http" + version = ">= 2.1.0" + } + } +} diff --git a/modules/datadog-monitor/CHANGELOG.md b/modules/datadog-monitor/CHANGELOG.md new file mode 100644 index 000000000..7ca47e3d6 --- /dev/null +++ b/modules/datadog-monitor/CHANGELOG.md @@ -0,0 +1,17 @@ +## PR [#814](https://github.com/cloudposse/terraform-aws-components/pull/814) + +### Removed Dead Code, Possible Breaking Change + +The following inputs were removed because they no longer have any effect: + +- datadog_api_secret_key +- datadog_app_secret_key +- datadog_secrets_source_store_account +- monitors_roles_map +- role_paths +- secrets_store_type + +Except for `monitors_roles_map` and `role_paths`, these inputs were deprecated in an earlier PR, and replaced with +outputs from `datadog-configuration`. + +The implementation of `monitors_roles_map` and `role_paths` has been lost. diff --git a/modules/datadog-monitor/README.md b/modules/datadog-monitor/README.md index 171d003a5..2a0543abe 100644 --- a/modules/datadog-monitor/README.md +++ b/modules/datadog-monitor/README.md @@ -1,9 +1,16 @@ +--- +tags: + - component/datadog-monitor + - layer/datadog + - provider/aws + - provider/datadog +--- + # Component: `datadog-monitor` -This component is responsible for provisioning Datadog monitors and assigning Datadog roles to the monitors. +This component is responsible for provisioning Datadog monitors and assigning Datadog roles to the monitors. -It's required that the DataDog API and APP secret keys are available in the consuming account at the `var.datadog_api_secret_key` -and `var.datadog_app_secret_key` paths in the AWS SSM Parameter Store. +It depends on the `datadog-configuration` component to get the Datadog API keys. ## Usage @@ -20,57 +27,200 @@ components: workspace_enabled: true vars: enabled: true - secrets_store_type: SSM local_datadog_monitors_config_paths: - "catalog/monitors/dev/*.yaml" - # Assign roles to monitors to allow/restrict access - monitors_roles_map: - aurora-replica-lag-dev: - - "corporate-it-dev" - - "development-dev" - - "site-reliability-dev" - ec2-failed-status-check-dev: - - "corporate-it-dev" - - "development-dev" - - "site-reliability-dev" ``` +## Conventions + +- Treat datadog like a separate cloud provider with integrations + ([datadog-integration](https://docs.cloudposse.com/components/library/aws/datadog-integration)) into your accounts. + +- Use the `catalog` convention to define a step of alerts. You can use ours or define your own. + [https://github.com/cloudposse/terraform-datadog-platform/tree/master/catalog/monitors](https://github.com/cloudposse/terraform-datadog-platform/tree/master/catalog/monitors) + +- The monitors catalog for the datadog-monitor component support datadog monitor exports. You can use + [the status page of a monitor to export it from 'settings'](https://docs.datadoghq.com/monitors/manage/status/#settings). + You can add the export to existing files or make new ones. Because the export is json formatted, it's also yaml + compatible. If you prefer, you can convert the export to yaml using your text editor or a cli tool like `yq`. + +## Adjust Thresholds per Stack + +Since there are so many parameters that may be adjusted for a given monitor, we define all monitors through YAML. By +convention, we define the **default monitors** that should apply to all environments, and then adjust the thresholds per +environment. This is accomplished using the `datadog-monitor` components variable `local_datadog_monitors_config_paths` +which defines the path to the YAML configuration files. By passing a path for `dev` and `prod`, we can define +configurations that are different per environment. + +For example, you might have the following settings defined for `prod` and `dev` stacks that override the defaults. + +For the `dev` stack: + +``` +components: + terraform: + datadog-monitor: + vars: + # Located in the components/terraform/datadog-monitor directory + local_datadog_monitors_config_paths: + - catalog/monitors/*.yaml + - catalog/monitors/dev/*.yaml # note this line +``` + +For `prod` stack: + +``` +components: + terraform: + datadog-monitor: + vars: + # Located in the components/terraform/datadog-monitor directory + local_datadog_monitors_config_paths: + - catalog/monitors/*.yaml + - catalog/monitors/prod/*.yaml # note this line +``` + +Behind the scenes (with `atmos`) we fetch all files from these glob patterns, template them, and merge them by key. If +we peek into the `*.yaml` and `dev/*.yaml` files above you could see an example like this: + +**components/terraform/datadog-monitor/catalog/monitors/elb.yaml** + +``` +elb-lb-httpcode-5xx-notify: + name: "(ELB) {{ env }} HTTP 5XX client error detected" + type: query alert + query: | + avg(last_15m):max:aws.elb.httpcode_elb_5xx{${context_dd_tags}} by {env,host} > 20 + message: | + [${ dd_env }] [ {{ env }} ] lb:[ {{host}} ] + {{#is_warning}} + Number of HTTP 5XX client error codes generated by the load balancer > {{warn_threshold}}% + {{/is_warning}} + {{#is_alert}} + Number of HTTP 5XX client error codes generated by the load balancer > {{threshold}}% + {{/is_alert}} + Check LB + escalation_message: "" + tags: {} + options: + renotify_interval: 60 + notify_audit: false + require_full_window: true + include_tags: true + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + new_group_delay: 0 + groupby_simple_monitor: false + renotify_occurrences: 0 + renotify_statuses: [] + validate: true + notify_no_data: false + no_data_timeframe: 5 + priority: 3 + threshold_windows: {} + thresholds: + critical: 50 + warning: 20 + priority: 3 + restricted_roles: null +``` + +**components/terraform/datadog-monitor/catalog/monitors/dev/elb.yaml** + +``` +elb-lb-httpcode-5xx-notify: + query: | + avg(last_15m):max:aws.elb.httpcode_elb_5xx{${context_dd_tags}} by {env,host} > 30 + priority: 2 + options: + thresholds: + critical: 30 + warning: 10 +``` + +## Key Notes + +### Inheritance + +The important thing to note here is that the default yaml is applied to every stage that it's deployed to. For dev +specifically however, we want to override the thresholds and priority for this monitor. This merging is done by key of +the monitor, in this case `elb-lb-httpcode-5xx-notify`. + +### Templating + +The second thing to note is `${ dd_env }`. This is **terraform** templating in action. While double braces (`{{ env }}`) +refers to datadog templating, `${ dd_env }` is a template variable we pass into our monitors. in this example we use it +to specify a grouping int he message. This value is passed in and can be overridden via stacks. + +We pass a value via: + +``` +components: + terraform: + datadog-monitor: + vars: + # Located in the components/terraform/datadog-monitor directory + local_datadog_monitors_config_paths: + - catalog/monitors/*.yaml + - catalog/monitors/dev/*.yaml + # templatefile() is used for all yaml config paths with these variables. + datadog_monitors_config_parameters: + dd_env: "dev" +``` + +This allows us to further use inheritance from stack configuration to keep our monitors dry, but configurable. + +Another available option is to use our catalog as base monitors and then override them with your specific fine tuning. + +``` +components: + terraform: + datadog-monitor: + vars: + local_datadog_monitors_config_paths: + - https://raw.githubusercontent.com/cloudposse/terraform-datadog-platform/0.27.0/catalog/monitors/ec2.yaml + - catalog/monitors/ec2.yaml +``` + +## Other Gotchas + +Our integration action that checks for `'source_type_name' equals 'Monitor Alert'` will also be true for synthetics. +Whereas if we check for `'event_type' equals 'query_alert_monitor'`, that's only true for monitors, because synthetics +will only be picked up by an integration action when `event_type` is `synthetics_alert`. + +This is important if we need to distinguish between monitors and synthetics in OpsGenie, which is the case when we want +to ensure clean messaging on OpsGenie incidents in Statuspage. + + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | | [datadog](#requirement\_datadog) | >= 3.3.0 | ## Providers -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +No providers. ## Modules | Name | Source | Version | |------|--------|---------| -| [datadog\_monitors](#module\_datadog\_monitors) | cloudposse/platform/datadog//modules/monitors | 1.0.0 | -| [datadog\_monitors\_merge](#module\_datadog\_monitors\_merge) | cloudposse/config/yaml//modules/deepmerge | 1.0.1 | +| [datadog\_configuration](#module\_datadog\_configuration) | ../datadog-configuration/modules/datadog_keys | n/a | +| [datadog\_monitors](#module\_datadog\_monitors) | cloudposse/platform/datadog//modules/monitors | 1.4.1 | +| [datadog\_monitors\_merge](#module\_datadog\_monitors\_merge) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [local\_datadog\_monitors\_yaml\_config](#module\_local\_datadog\_monitors\_yaml\_config) | cloudposse/config/yaml | 1.0.1 | -| [remote\_datadog\_monitors\_yaml\_config](#module\_remote\_datadog\_monitors\_yaml\_config) | cloudposse/config/yaml | 1.0.1 | +| [local\_datadog\_monitors\_yaml\_config](#module\_local\_datadog\_monitors\_yaml\_config) | cloudposse/config/yaml | 1.0.2 | +| [remote\_datadog\_monitors\_yaml\_config](#module\_remote\_datadog\_monitors\_yaml\_config) | cloudposse/config/yaml | 1.0.2 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources -| Name | Type | -|------|------| -| [aws_secretsmanager_secret.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | -| [aws_secretsmanager_secret.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | -| [aws_secretsmanager_secret_version.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | -| [aws_secretsmanager_secret_version.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | -| [aws_ssm_parameter.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | -| [aws_ssm_parameter.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +No resources. ## Inputs @@ -81,20 +231,15 @@ components: | [alert\_tags\_separator](#input\_alert\_tags\_separator) | Separator for the alert tags. All strings from the `alert_tags` variable will be joined into one string using the separator and then added to the alert message | `string` | `"\n"` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [datadog\_api\_secret\_key](#input\_datadog\_api\_secret\_key) | The key of the Datadog API secret | `string` | `"datadog/datadog_api_key"` | no | -| [datadog\_app\_secret\_key](#input\_datadog\_app\_secret\_key) | The key of the Datadog Application secret | `string` | `"datadog/datadog_app_key"` | no | | [datadog\_monitor\_context\_tags](#input\_datadog\_monitor\_context\_tags) | List of context tags to add to each monitor | `set(string)` |
[
"namespace",
"tenant",
"environment",
"stage"
]
| no | | [datadog\_monitor\_context\_tags\_enabled](#input\_datadog\_monitor\_context\_tags\_enabled) | Whether to add context tags to each monitor | `bool` | `true` | no | | [datadog\_monitor\_globals](#input\_datadog\_monitor\_globals) | Global parameters to add to each monitor | `any` | `{}` | no | | [datadog\_monitors\_config\_parameters](#input\_datadog\_monitors\_config\_parameters) | Map of parameters to Datadog monitor configurations | `map(any)` | `{}` | no | -| [datadog\_secrets\_source\_store\_account](#input\_datadog\_secrets\_source\_store\_account) | Account (stage) holding Secret Store for Datadog API and app keys. | `string` | `"corp"` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -102,15 +247,12 @@ components: | [local\_datadog\_monitors\_config\_paths](#input\_local\_datadog\_monitors\_config\_paths) | List of paths to local Datadog monitor configurations | `list(string)` | `[]` | no | | [message\_postfix](#input\_message\_postfix) | Additional information to put after each monitor message | `string` | `""` | no | | [message\_prefix](#input\_message\_prefix) | Additional information to put before each monitor message | `string` | `""` | no | -| [monitors\_roles\_map](#input\_monitors\_roles\_map) | Map of Datadog monitor names to a set of Datadog role names to restrict access to the monitors | `map(set(string))` | `{}` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [remote\_datadog\_monitors\_base\_path](#input\_remote\_datadog\_monitors\_base\_path) | Base path to remote Datadog monitor configurations | `string` | `""` | no | | [remote\_datadog\_monitors\_config\_paths](#input\_remote\_datadog\_monitors\_config\_paths) | List of paths to remote Datadog monitor configurations | `list(string)` | `[]` | no | -| [role\_paths](#input\_role\_paths) | List of paths to Datadog role configurations | `list(string)` | `[]` | no | -| [secrets\_store\_type](#input\_secrets\_store\_type) | Secret store type for Datadog API and app keys. Valid values: `SSM`, `ASM` | `string` | `"SSM"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -121,10 +263,19 @@ components: |------|-------------| | [datadog\_monitor\_names](#output\_datadog\_monitor\_names) | Names of the created Datadog monitors | + + +## Related How-to Guides + +- [How to Monitor Everything with Datadog](https://docs.cloudposse.com/layers/monitoring/datadog/) + +## Component Dependencies +- [datadog-integration](https://docs.cloudposse.com/components/library/aws/datadog-integration/) ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/datadog-monitor) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/datadog-monitor) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/datadog-monitor/asm.tf b/modules/datadog-monitor/asm.tf deleted file mode 100644 index d3ba132c8..000000000 --- a/modules/datadog-monitor/asm.tf +++ /dev/null @@ -1,19 +0,0 @@ -data "aws_secretsmanager_secret" "datadog_api_key" { - count = local.asm_enabled ? 1 : 0 - name = var.datadog_api_secret_key -} - -data "aws_secretsmanager_secret_version" "datadog_api_key" { - count = local.asm_enabled ? 1 : 0 - secret_id = data.aws_secretsmanager_secret.datadog_api_key[0].id -} - -data "aws_secretsmanager_secret" "datadog_app_key" { - count = local.asm_enabled ? 1 : 0 - name = var.datadog_app_secret_key -} - -data "aws_secretsmanager_secret_version" "datadog_app_key" { - count = local.asm_enabled ? 1 : 0 - secret_id = data.aws_secretsmanager_secret.datadog_app_key[0].id -} diff --git a/modules/datadog-monitor/catalog/monitors/aurora.yaml b/modules/datadog-monitor/catalog/monitors/aurora.yaml index 12b9cfb5a..6efa79fb5 100644 --- a/modules/datadog-monitor/catalog/monitors/aurora.yaml +++ b/modules/datadog-monitor/catalog/monitors/aurora.yaml @@ -5,8 +5,9 @@ aurora-replica-lag: name: "(RDS) ${tenant} ${stage} - Aurora Replica Lag Detected" type: metric alert query: | - min(last_15m):min:aws.rds.aurora_replica_lag{stage:${ stage }} by {dbinstanceidentifier} > 1000 + min(last_15m):min:aws.rds.aurora_replica_lag{stage:${ stage }} by {dbinstanceidentifier,stage,tenant,environment,team} > 1000 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{#is_warning}} ({dbinstanceidentifier}) Replica lag has been greater than half a second for more than 15 minutes {{/is_warning}} @@ -16,23 +17,23 @@ aurora-replica-lag: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 1000 - warning: 500 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 1000 + warning: 500 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/ec2.yaml b/modules/datadog-monitor/catalog/monitors/ec2.yaml index 3c297366d..7a29fc85e 100644 --- a/modules/datadog-monitor/catalog/monitors/ec2.yaml +++ b/modules/datadog-monitor/catalog/monitors/ec2.yaml @@ -5,29 +5,29 @@ ec2-failed-status-check: name: "(EC2) ${tenant} ${ stage } - Failed Status Check" type: metric alert query: | - avg(last_10m):avg:aws.ec2.status_check_failed{stage:${ stage }} by {instance_id} > 0 + avg(last_10m):avg:aws.ec2.status_check_failed{stage:${ stage }} by {instance_id,stage,tenant,environment,team} > 0 message: | - ({stage} {region}) {instance_id} failed a status check + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{instance_id}} failed a status check escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 0 - #warning: - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 0 + #warning: + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/efs.yaml b/modules/datadog-monitor/catalog/monitors/efs.yaml index 80880d944..9453b79c4 100644 --- a/modules/datadog-monitor/catalog/monitors/efs.yaml +++ b/modules/datadog-monitor/catalog/monitors/efs.yaml @@ -5,32 +5,32 @@ efs-throughput-utilization-check: name: "(EFS) ${tenant} ${ stage } - % Throughput Utilization" type: metric alert query: | - avg(last_1h):(sum:aws.efs.metered_iobytes{stage:${ stage }} by {filesystemid} * 100 / 1048576) / (sum:aws.efs.permitted_throughput{stage:${ stage }} by {filesystemid} / 1048576) > 75 + avg(last_1h):(sum:aws.efs.metered_iobytes{stage:${ stage }} by {filesystemid} * 100 / 1048576) / (sum:aws.efs.permitted_throughput{stage:${ stage }} by {filesystemid,stage,tenant,environment,team} / 1048576) > 75 message: | - ({stage} {region}) {filesystemid} Throughput Utilization is too high + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{filesystemid}} Throughput Utilization is too high escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 75 - warning: 50 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 75 + warning: 50 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null # The official Datadog API documentation with available query parameters & alert types: # https://docs.datadoghq.com/api/v1/monitors/#create-a-monitor @@ -39,92 +39,91 @@ efs-burst-balance: name: "(EFS) ${tenant} ${ stage } - Burst Balance Low (< 100 GB)" type: metric alert query: | - min(last_1h):avg:aws.efs.burst_credit_balance{stage:${ stage }} by {filesystemid} < 100000000000 + min(last_1h):avg:aws.efs.burst_credit_balance{stage:${ stage }} by {filesystemid,stage,tenant,environment,team} < 100000000000 message: | - ({stage} {region}) {filesystemid} EFS Burst Balance for {filesystemid} dipped below 100 GB. + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{filesystemid}} EFS Burst Balance for {{filesystemid}} dipped below 100 GB. escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 100000000000 # 100 GB - warning: 1000000000000 # 1TB - #unknown: - #ok: - #critical_recovery: - #warning_recovery: - + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 100000000000 # 100 GB + warning: 1000000000000 # 1TB + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null efs-io-percent-limit: name: "(EFS) ${tenant} ${ stage } - I/O limit has been reached (> 90%)" type: metric alert query: | - max(last_1h):avg:aws.efs.percent_iolimit{stage:${ stage }} by {filesystemid} > 90 + max(last_1h):avg:aws.efs.percent_iolimit{stage:${ stage }} by {filesystemid,stage,tenant,environment,team} > 90 message: | - ({stage} {region}) {filesystemid} EFS I/O limit has been reached for fs {filesystemid}. + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{filesystemid}} EFS I/O limit has been reached for fs {{filesystemid}}. escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 90 - warning: 50 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 90 + warning: 50 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null efs-client-connection-anomaly: name: "(EFS) ${tenant} ${ stage } - Client Connection Anomaly" type: metric alert query: | - avg(last_4h):anomalies(avg:aws.efs.client_connections{stage:${ stage }} by {aws_account,filesystemid,name}.as_count(), 'basic', 2, direction='both', alert_window='last_15m', interval=60, count_default_zero='true') >= 1 + avg(last_4h):anomalies(avg:aws.efs.client_connections{stage:${ stage }} by {aws_account,filesystemid,name,stage,tenant,environment,team}.as_count(), 'basic', 2, direction='both', alert_window='last_15m', interval=60, count_default_zero='true') >= 1 message: | - ({stage} {region}) [{name}] EFS Client Connection Anomoly for filesystem {filesystemid}. + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{name}}] EFS Client Connection Anomaly for filesystem {{filesystemid}}. escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 1 - critical_recovery: 0 - #warning: - #unknown: - #ok: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 1 + critical_recovery: 0 + #warning: + #unknown: + #ok: + #warning_recovery: + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/elb.yaml b/modules/datadog-monitor/catalog/monitors/elb.yaml index e252c7071..0b38f34d1 100644 --- a/modules/datadog-monitor/catalog/monitors/elb.yaml +++ b/modules/datadog-monitor/catalog/monitors/elb.yaml @@ -2,9 +2,9 @@ elb-lb-httpcode-5xx-notify: name: "(ELB) ${tenant} ${ stage } HTTP 5XX client error detected" type: query alert query: | - avg(last_15m):max:aws.elb.httpcode_elb_5xx{${context_dd_tags}} by {env,host} > 50 + avg(last_15m):max:aws.elb.httpcode_elb_5xx{${context_dd_tags}} by {env,host,stage,tenant,environment,team} > 50 message: | - [${ stage }] [ {{ env }} ] lb:[ {{host}} ] + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) lb:[ {{host}} ] {{#is_warning}} Number of HTTP 5XX client error codes generated by the load balancer > {{warn_threshold}}% {{/is_warning}} @@ -15,19 +15,19 @@ elb-lb-httpcode-5xx-notify: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: {} - thresholds: - critical: 50 - warning: 20 + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: {} + thresholds: + critical: 50 + warning: 20 + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/host.yaml b/modules/datadog-monitor/catalog/monitors/host.yaml index edc61520b..2a7cb94f0 100644 --- a/modules/datadog-monitor/catalog/monitors/host.yaml +++ b/modules/datadog-monitor/catalog/monitors/host.yaml @@ -4,115 +4,115 @@ host-io-wait-times: name: "(Host) ${tenant} ${ stage } - I/O Wait Times" type: metric alert - query: "avg(last_10m):avg:system.cpu.iowait{stage:${ stage }} by {host} > 50" + query: "avg(last_10m):avg:system.cpu.iowait{stage:${ stage }} by {host,stage,tenant,environment,team} > 50" message: |- The I/O wait time for ({{host.name}} {{host.ip}}) is very high escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 50 - warning: 30 + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 50 + warning: 30 + priority: 3 + restricted_roles: null host-disk-use: name: "(Host) ${tenant} ${ stage } - Host Disk Usage" type: metric alert - query: "avg(last_30m):(avg:system.disk.total{stage:${ stage }} by {host} - avg:system.disk.free{stage:${ stage }} by {host}) / avg:system.disk.total{stage:${ stage }} by {host} * 100 > 90" + query: "avg(last_30m):(avg:system.disk.total{stage:${ stage }} by {host,stage,tenant,environment,team} - avg:system.disk.free{stage:${ stage }} by {host}) / avg:system.disk.total{stage:${ stage }} by {host} * 100 > 90" message: |- Disk Usage has been above threshold over 30 minutes on ({{host.name}} {{host.ip}}) escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 90 - warning: 80 - #unknown: - #ok: - critical_recovery: 85 - warning_recovery: 75 + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 90 + warning: 80 + #unknown: + #ok: + critical_recovery: 85 + warning_recovery: 75 + priority: 3 + restricted_roles: null host-high-mem-use: name: "(Host) ${tenant} ${ stage } - Memory Utilization" type: query alert - query: "avg(last_15m):avg:system.mem.pct_usable{stage:${ stage }} by {host} < 0.1" + query: "avg(last_15m):avg:system.mem.pct_usable{stage:${ stage }} by {host,stage,tenant,environment,team} < 0.1" message: |- Running out of free memory on ({{host.name}} {{host.ip}}) escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 0.1 - warning: 0.15 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 0.1 + warning: 0.15 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null host-high-load-avg: name: "(Host) ${tenant} ${ stage } - High System Load Average" type: metric alert - query: "avg(last_30m):avg:system.load.norm.5{stage:${ stage }} by {host} > 0.8" + query: "avg(last_30m):avg:system.load.norm.5{stage:${ stage }} by {host,stage,tenant,environment,team} > 0.8" message: |- - Load average is high on ({{host.name}} {{host.ip}}) + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) Load average is high on ({{host.name}} {{host.ip}}) escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 0.8 - warning: 0.75 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 0.8 + warning: 0.75 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/k8s.yaml b/modules/datadog-monitor/catalog/monitors/k8s.yaml index 5ff999311..349ea221c 100644 --- a/modules/datadog-monitor/catalog/monitors/k8s.yaml +++ b/modules/datadog-monitor/catalog/monitors/k8s.yaml @@ -5,354 +5,354 @@ k8s-deployment-replica-pod-down: name: "(k8s) ${tenant} ${ stage } - Deployment Replica Pod is down" type: query alert query: | - avg(last_15m):avg:kubernetes_state.deployment.replicas_desired{stage:${ stage }} by {cluster_name,deployment} - avg:kubernetes_state.deployment.replicas_ready{stage:${ stage }} by {cluster_name,deployment} >= 2 + avg(last_15m):avg:kubernetes_state.deployment.replicas_desired{stage:${ stage }} by {cluster_name,deployment,stage,tenant,environment,team} - avg:kubernetes_state.deployment.replicas_ready{stage:${ stage }} by {cluster_name,deployment,stage,tenant,environment,team} >= 2 message: | - ({{cluster_name.name}}) More than one Deployments Replica's pods are down on {{deployment.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] More than one Deployments Replica's pods are down on {{deployment.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 5 - threshold_windows: { } - thresholds: - critical: 2 + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 5 + threshold_windows: { } + thresholds: + critical: 2 + priority: 3 + restricted_roles: null k8s-pod-restarting: name: "(k8s) ${tenant} ${ stage } - Pods are restarting multiple times" type: query alert query: | - change(sum(last_5m),last_5m):exclude_null(avg:kubernetes.containers.restarts{stage:${ stage }} by {cluster_name,kube_namespace,pod_name}) > 5 + change(sum(last_5m),last_5m):exclude_null(avg:kubernetes.containers.restarts{stage:${ stage }} by {cluster_name,kube_namespace,pod_name,stage,tenant,environment,team}) > 5 message: | - ({{cluster_name.name}}) pod {{pod_name.name}} is restarting multiple times on {{kube_namespace.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] pod {{pod_name.name}} is restarting multiple times on {{kube_namespace.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 5 - warning: 3 + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 5 + warning: 3 + priority: 3 + restricted_roles: null k8s-statefulset-replica-down: name: "(k8s) ${tenant} ${ stage } - StatefulSet Replica Pod is down" type: query alert query: | - max(last_15m):sum:kubernetes_state.statefulset.replicas_desired{stage:${ stage }} by {cluster_name,kube_namespace,statefulset} - sum:kubernetes_state.statefulset.replicas_ready{stage:${ stage }} by {cluster_name,kube_namespace,statefulset} >= 2 + max(last_15m):sum:kubernetes_state.statefulset.replicas_desired{stage:${ stage }} by {cluster_name,kube_namespace,statefulset,stage,tenant,environment,team} - sum:kubernetes_state.statefulset.replicas_ready{stage:${ stage }} by {cluster_name,kube_namespace,statefulset,stage,tenant,environment,team} >= 2 message: | - ({{cluster_name.name}} {{statefulset.name}}) More than one StatefulSet Replica's pods are down on {{kube_namespace.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}} {{statefulset.name}}] More than one StatefulSet Replica's pods are down on {{kube_namespace.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - warning: 1 - critical: 2 - + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + warning: 1 + critical: 2 + priority: 3 + restricted_roles: null k8s-daemonset-pod-down: name: "(k8s) ${tenant} ${ stage } - DaemonSet Pod is down" type: query alert query: | - max(last_15m):sum:kubernetes_state.daemonset.desired{stage:${ stage }} by {cluster_name,kube_namespace,daemonset} - sum:kubernetes_state.daemonset.ready{stage:${ stage }} by {cluster_name,kube_namespace,daemonset} >= 1 + max(last_15m):sum:kubernetes_state.daemonset.desired{stage:${ stage }} by {cluster_name,kube_namespace,daemonset,stage,tenant,environment,team} - sum:kubernetes_state.daemonset.ready{stage:${ stage }} by {cluster_name,kube_namespace,daemonset,stage,tenant,environment,team} >= 1 message: | - ({{cluster_name.name}} {{daemonset.name}}) One or more DaemonSet pods are down on {{kube_namespace.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}} {{daemonset.name}}] One or more DaemonSet pods are down on {{kube_namespace.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 1 + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 1 + priority: 3 + restricted_roles: null k8s-crashloopBackOff: name: "(k8s) ${tenant} ${ stage } - CrashloopBackOff detected" type: query alert query: | - max(last_10m):max:kubernetes_state.container.status_report.count.waiting{stage:${ stage },reason:crashloopbackoff} by {cluster_name,kube_namespace,pod_name} >= 1 + max(last_10m):max:kubernetes_state.container.status_report.count.waiting{stage:${ stage },reason:crashloopbackoff} by {cluster_name,kube_namespace,pod_name,stage,tenant,environment,team} >= 1 message: | - ({{cluster_name.name}}) pod {{pod_name.name}} is CrashloopBackOff on {{kube_namespace.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] pod {{pod_name.name}} is CrashloopBackOff on {{kube_namespace.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 1 + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 1 + priority: 3 + restricted_roles: null k8s-multiple-pods-failing: name: "(k8s) ${tenant} ${ stage } - Multiple Pods are failing" type: query alert query: | - change(avg(last_5m),last_5m):sum:kubernetes_state.pod.status_phase{stage:${ stage },phase:failed} by {cluster_name,kube_namespace} > 10 + change(avg(last_5m),last_5m):sum:kubernetes_state.pod.status_phase{stage:${ stage },phase:failed} by {cluster_name,kube_namespace,stage,tenant,environment,team} > 10 message: | - ({{cluster_name.name}}) More than ten pods are failing on {{kube_namespace.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] More than ten pods are failing on {{kube_namespace.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - warning: 5 - critical: 10 + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + warning: 5 + critical: 10 + priority: 3 + restricted_roles: null k8s-unavailable-deployment-replica: name: "(k8s) ${tenant} ${ stage } - Unavailable Deployment Replica(s) detected" type: metric alert query: | - max(last_10m):max:kubernetes_state.deployment.replicas_unavailable{stage:${ stage }} by {cluster_name,kube_namespace} > 0 + max(last_10m):max:kubernetes_state.deployment.replicas_unavailable{stage:${ stage }} by {cluster_name,kube_namespace,stage,tenant,environment,team} > 0 message: | - ({{cluster_name.name}}) Detected unavailable Deployment replicas for longer than 10 minutes on {{kube_namespace.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] Detected unavailable Deployment replicas for longer than 10 minutes on {{kube_namespace.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 0 - #warning: - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 0 + #warning: + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-unavailable-statefulset-replica: name: "(k8s) ${tenant} ${ stage } - Unavailable Statefulset Replica(s) detected" type: metric alert query: | - max(last_10m):max:kubernetes_state.statefulset.replicas_unavailable{stage:${ stage }} by {cluster_name,kube_namespace} > 0 + max(last_10m):max:kubernetes_state.statefulset.replicas_unavailable{stage:${ stage }} by {cluster_name,kube_namespace,stage,tenant,environment,team} > 0 message: | - ({{cluster_name.name}}) Detected unavailable Statefulset replicas for longer than 10 minutes on {{kube_namespace.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] Detected unavailable Statefulset replicas for longer than 10 minutes on {{kube_namespace.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 0 - #warning: - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 0 + #warning: + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-node-status-unschedulable: name: "(k8s) ${tenant} ${ stage } - Detected Unschedulable Node(s)" type: query alert query: | - max(last_15m):sum:kubernetes_state.node.status{stage:${ stage },status:schedulable} by {cluster_name} * 100 / sum:kubernetes_state.node.status{stage:${ stage }} by {cluster_name} < 80 + max(last_15m):sum:kubernetes_state.node.status{stage:${ stage },status:schedulable} by {cluster_name} * 100 / sum:kubernetes_state.node.status{stage:${ stage }} by {cluster_name,stage,tenant,environment,team} < 80 message: | - More than 20% of nodes are unschedulable on ({{cluster_name}} cluster). \n Keep in mind that this might be expected based on your infrastructure. + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] More than 20% of nodes are unschedulable on the cluster. \n Keep in mind that this might be expected based on your infrastructure. escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 80 - warning: 90 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 80 + warning: 90 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-imagepullbackoff: name: "(k8s) ${tenant} ${ stage } - ImagePullBackOff detected" type: "query alert" query: | - max(last_10m):max:kubernetes_state.container.status_report.count.waiting{reason:imagepullbackoff,stage:${ stage }} by {kube_cluster_name,kube_namespace,pod_name} >= 1 + max(last_10m):max:kubernetes_state.container.status_report.count.waiting{reason:imagepullbackoff,stage:${ stage }} by {kube_cluster_name,kube_namespace,pod_name,stage,tenant,environment,team} >= 1 message: | - Pod {{pod_name.name}} is ImagePullBackOff on namespace {{kube_namespace.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] Pod {{pod_name.name}} is ImagePullBackOff on namespace {{kube_namespace.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 1 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 1 - #warning: - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 1 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 1 + #warning: + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-high-cpu-usage: name: "(k8s) ${tenant} ${ stage } - High CPU Usage Detected" type: metric alert query: | - avg(last_10m):avg:system.cpu.system{stage:${ stage }} by {host} > 90 + avg(last_10m):avg:system.cpu.system{stage:${ stage }} by {host,stage,tenant,environment,team} > 90 message: | - ({{host.cluster_name}}) High CPU usage for the last 10 minutes on {{host.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}} {{host.cluster_name}}] High CPU usage for the last 10 minutes on {{host.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 90 - warning: 60 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 90 + warning: 60 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-high-disk-usage: name: "(k8s) ${tenant} ${ stage } - High Disk Usage Detected" type: metric alert query: | - min(last_5m):min:system.disk.used{stage:${ stage }} by {host,cluster_name} / avg:system.disk.total{stage:${ stage }} by {host,cluster_name} * 100 > 90 + min(last_5m):min:system.disk.used{stage:${ stage }} by {host,cluster_name,stage,tenant,environment,team} / avg:system.disk.total{stage:${ stage }} by {host,cluster_name,stage,tenant,environment,team} * 100 > 90 message: | - ({{cluster_name.name}}) High disk usage detected on {{host.name}} + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] High disk usage detected on {{host.name}} escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 90 - warning: 75 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 90 + warning: 75 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-high-memory-usage: name: "(k8s) ${tenant} ${ stage } - High Memory Usage Detected" type: metric alert query: | - avg(last_10m):avg:kubernetes.memory.usage_pct{stage:${ stage }} by {cluster_name} > 90 + avg(last_10m):avg:kubernetes.memory.usage_pct{stage:${ stage }} by {cluster_name,stage,tenant,environment,team} > 90 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] High memory usage detected on {{host.name}} {{#is_warning}} {{cluster_name.name}} memory usage greater than 80% for 10 minutes {{/is_warning}} @@ -362,33 +362,34 @@ k8s-high-memory-usage: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 90 - warning: 80 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 90 + warning: 80 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-high-filesystem-usage: name: "(k8s) ${tenant} ${ stage } - High Filesystem Usage Detected" type: metric alert query: | - avg(last_10m):avg:kubernetes.filesystem.usage_pct{stage:${ stage }} by {cluster_name} > 90 + avg(last_10m):avg:kubernetes.filesystem.usage_pct{stage:${ stage }} by {cluster_name,stage,tenant,environment,team} > 90 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] {{#is_warning}} {{cluster_name.name}} filesystem usage greater than 80% for 10 minutes {{/is_warning}} @@ -398,33 +399,34 @@ k8s-high-filesystem-usage: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 90 - warning: 80 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 90 + warning: 80 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-network-tx-errors: name: "(k8s) ${tenant} ${ stage } - High Network TX (send) Errors" type: metric alert query: | - avg(last_10m):avg:kubernetes.network.tx_errors{stage:${ stage }} by {cluster_name} > 100 + avg(last_10m):avg:kubernetes.network.tx_errors{stage:${ stage }} by {cluster_name,stage,tenant,environment,team} > 100 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] {{#is_warning}} {{cluster_name.name}} network TX (send) errors occurring 10 times per second {{/is_warning}} @@ -434,33 +436,34 @@ k8s-network-tx-errors: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 100 - warning: 10 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 100 + warning: 10 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-network-rx-errors: name: "(k8s) ${tenant} ${ stage } - High Network RX (receive) Errors" type: metric alert query: | - avg(last_10m):avg:kubernetes.network.rx_errors{stage:${ stage }} by {cluster_name} > 100 + avg(last_10m):avg:kubernetes.network.rx_errors{stage:${ stage }} by {cluster_name,stage,tenant,environment,team} > 100 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] {{#is_warning}} {{cluster_name.name}} network RX (receive) errors occurring 10 times per second {{/is_warning}} @@ -470,86 +473,86 @@ k8s-network-rx-errors: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 100 - warning: 10 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 100 + warning: 10 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-increased-pod-crash: name: "(k8s) ${tenant} ${ stage } - Increased Pod Crashes" type: query alert query: | - avg(last_5m):avg:kubernetes_state.container.restarts{stage:${ stage }} by {cluster_name,kube_namespace,pod} - hour_before(avg:kubernetes_state.container.restarts{stage:${ stage }} by {cluster_name,kube_namespace,pod}) > 3 + avg(last_5m):avg:kubernetes_state.container.restarts{stage:${ stage }} by {cluster_name,kube_namespace,pod,stage,tenant,environment,team} - hour_before(avg:kubernetes_state.container.restarts{stage:${ stage }} by {cluster_name,kube_namespace,pod,stage,tenant,environment,team}) > 3 message: |- - ({{cluster_name.name}} {{kube_namespace.name}} {{pod.name}}) has crashed repeatedly over the last hour + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}} {{kube_namespace.name}} {{pod.name}}] has crashed repeatedly over the last hour escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: false - require_full_window: false - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 3 - #warning: - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: false + require_full_window: false + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 3 + #warning: + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null k8s-pending-pods: name: "(k8s) ${tenant} ${ stage } - Pending Pods" type: metric alert query: | - min(last_30m):sum:kubernetes_state.pod.status_phase{stage:${ stage },phase:running} by {cluster_name} - sum:kubernetes_state.pod.status_phase{stage:${ stage },phase:running} by {cluster_name} + sum:kubernetes_state.pod.status_phase{stage:${ stage },phase:pending} by {cluster_name}.fill(zero) >= 1 + min(last_30m):sum:kubernetes_state.pod.status_phase{stage:${ stage },phase:running} by {cluster_name,stage,tenant,environment,team} - sum:kubernetes_state.pod.status_phase{stage:${ stage },phase:running} by {cluster_name,stage,tenant,environment,team} + sum:kubernetes_state.pod.status_phase{stage:${ stage },phase:pending} by {cluster_name,stage,tenant,environment,team}.fill(zero) >= 1 message: |- - ({{cluster_name.name}}) There has been at least 1 pod Pending for 30 minutes. + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) [{{cluster_name.name}}] There has been at least 1 pod Pending for 30 minutes. There are currently ({{value}}) pods Pending. escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: false - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 1 - #warning: - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: false + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 1 + #warning: + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/lambda-log-forwarder.yaml b/modules/datadog-monitor/catalog/monitors/lambda-log-forwarder.yaml new file mode 100644 index 000000000..76238267e --- /dev/null +++ b/modules/datadog-monitor/catalog/monitors/lambda-log-forwarder.yaml @@ -0,0 +1,38 @@ +# The official Datadog API documentation with available query parameters & alert types: +# https://docs.datadoghq.com/api/v1/monitors/#create-a-monitor + +datadog-lambda-forwarder-config-modification: + name: "(Lambda) ${tenant} ${ stage } - - Datadog Lambda Forwarder Config Changed" + type: event-v2 alert + query: | + events("source:amazon_lambda functionname:${tenant}-${environment}-${ stage }-datadog-lambda-forwarder-logs").rollup("count").last("15m") >= 1 + message: | + Configuration has been changed for the datadog lambda forwarder in + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) + by {{ event.tags.username }}. + Event title: {{ event.title }} + Lambda function name: {{ event.tags.functionname }} + Event ID: {{ event.id }} + escalation_message: "" + tags: + managed-by: Terraform + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 1 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 1 + #warning: + #unknown: + #ok: + #critical_recovery: + #warning_recovery + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/lambda.yaml b/modules/datadog-monitor/catalog/monitors/lambda.yaml new file mode 100644 index 000000000..3346fe6eb --- /dev/null +++ b/modules/datadog-monitor/catalog/monitors/lambda.yaml @@ -0,0 +1,26 @@ +# The official Datadog API documentation with available query parameters & alert types: +# https://docs.datadoghq.com/api/v1/monitors/#create-a-monitor + +lambda-errors: + name: "(Lambda) ${tenant} ${ stage } - Lambda [{{functionname.name}}] has errors" + type: query alert + query: sum(last_5m):sum:aws.lambda.errors{*} by {stage,tenant,environment,functionname}.as_count() > 0 + message: | + Lambda {{functionname.name}} in + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) + has {{value}} errors over the last 5 minutes. + tags: + managed-by: Terraform + options: + notify_audit: false + require_full_window: false + notify_no_data: false + renotify_interval: 0 + include_tags: true + evaluation_delay: 900 + new_group_delay: 60 + threshold_windows: { } + thresholds: + critical: 0 + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/rabbitmq.yaml b/modules/datadog-monitor/catalog/monitors/rabbitmq.yaml index 9bccec922..df8e106e9 100644 --- a/modules/datadog-monitor/catalog/monitors/rabbitmq.yaml +++ b/modules/datadog-monitor/catalog/monitors/rabbitmq.yaml @@ -2,8 +2,9 @@ rabbitmq-messages-unacknowledged-rate-too-high: name: "[RabbitMQ] ${tenant} ${ stage } - Messages unacknowledged rate is higher than usual on: {{broker.name}}" type: "query alert" query: | - avg(last_4h):anomalies(avg:aws.amazonmq.message_unacknowledged_count{stage:${ stage }} by {broker,queue}, 'agile', 2, direction='above', alert_window='last_15m', interval=60, count_default_zero='true', seasonality='hourly') >= 1 + avg(last_4h):anomalies(avg:aws.amazonmq.message_unacknowledged_count{stage:${ stage }} by {broker,queue,stage,tenant,environment,team}, 'agile', 2, direction='above', alert_window='last_15m', interval=60, count_default_zero='true', seasonality='hourly') >= 1 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) The rate at which messages are being delivered without receiving acknowledgement is higher than usual. There may be errors or performance issues downstream.\n Broker: {{broker.name}}\n @@ -12,33 +13,34 @@ rabbitmq-messages-unacknowledged-rate-too-high: tags: managed-by: Terraform integration: rabbitmq - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: null - threshold_windows: { } - thresholds: - critical: 1 - critical_recovery: 0 - #warning: - #unknown: - #ok: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: null + threshold_windows: { } + thresholds: + critical: 1 + critical_recovery: 0 + #warning: + #unknown: + #ok: + #warning_recovery: + priority: 3 + restricted_roles: null rabbitmq-memory-utilization: name: "[RabbitMQ] ${tenant} ${ stage } - Memory Utilization: {{broker.name}}" type: "query alert" query: | - avg(last_10m):avg:aws.amazonmq.rabbit_mqmem_used{stage:${ stage }} by {broker,node} / avg:aws.amazonmq.rabbit_mqmem_limit{stage:${ stage }} by {broker,node} > 0.50 + avg(last_10m):avg:aws.amazonmq.rabbit_mqmem_used{stage:${ stage }} by {broker,node,stage,tenant,environment,team} / avg:aws.amazonmq.rabbit_mqmem_limit{stage:${ stage }} by {broker,node,stage,tenant,environment,team} > 0.50 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) Memory Percentage of a node in Rabbit MQ Cluster is high Broker: {{broker.name}} Node: {{node.name}} @@ -46,32 +48,33 @@ rabbitmq-memory-utilization: tags: managed-by: Terraform integration: rabbitmq - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: null - threshold_windows: { } - thresholds: - critical: 0.50 - critical_recovery: 0.40 - #unknown: - #ok: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: null + threshold_windows: { } + thresholds: + critical: 0.50 + critical_recovery: 0.40 + #unknown: + #ok: + #warning_recovery: + priority: 3 + restricted_roles: null rabbitmq-disk-utilization: name: "[RabbitMQ] ${tenant} ${ stage } - Disk Utilization: {{broker.name}}" type: "query alert" query: | - avg(last_10m):avg:aws.amazonmq.rabbit_mqdisk_free{stage:${ stage }} by {broker,node} < 100000000000 + avg(last_10m):avg:aws.amazonmq.rabbit_mqdisk_free{stage:${ stage }} by {broker,node,stage,tenant,environment,team} < 100000000000 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) Free Disk Space of a node in Rabbit MQ Cluster is Less than 100 GB Broker: {{broker.name}} Node: {{node.name}} @@ -79,21 +82,21 @@ rabbitmq-disk-utilization: tags: managed-by: Terraform integration: rabbitmq - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 0 - timeout_h: 0 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: null - threshold_windows: { } - thresholds: - critical: 100000000000 - #unknown: - #ok: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 0 + timeout_h: 0 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: null + threshold_windows: { } + thresholds: + critical: 100000000000 + #unknown: + #ok: + #warning_recovery: + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/catalog/monitors/rds.yaml b/modules/datadog-monitor/catalog/monitors/rds.yaml index e92f8966f..946861130 100644 --- a/modules/datadog-monitor/catalog/monitors/rds.yaml +++ b/modules/datadog-monitor/catalog/monitors/rds.yaml @@ -5,8 +5,9 @@ rds-cpuutilization: name: "(RDS) ${tenant} ${ stage } - CPU Utilization above 90%" type: metric alert query: | - avg(last_15m):avg:aws.rds.cpuutilization{stage:${ stage }} by {dbinstanceidentifier} > 90 + avg(last_15m):avg:aws.rds.cpuutilization{stage:${ stage }} by {dbinstanceidentifier,stage,tenant,environment,team} > 90 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{#is_warning}} ({dbinstanceidentifier}) CPU Utilization above 85% {{/is_warning}} @@ -16,33 +17,34 @@ rds-cpuutilization: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 90 - warning: 85 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 90 + warning: 85 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null rds-disk-queue-depth: name: "(RDS) ${tenant} ${ stage } - Disk queue depth above 64" type: metric alert query: | - avg(last_15m):avg:aws.rds.disk_queue_depth{stage:${ stage }} by {dbinstanceidentifier} > 64 + avg(last_15m):avg:aws.rds.disk_queue_depth{stage:${ stage }} by {dbinstanceidentifier,stage,tenant,environment,team} > 64 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{#is_warning}} ({dbinstanceidentifier}) Disk queue depth above 48 {{/is_warning}} @@ -52,26 +54,26 @@ rds-disk-queue-depth: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 64 - warning: 48 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 64 + warning: 48 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null rds-freeable-memory: name: "(RDS) ${tenant} ${ stage } - Freeable memory below 256 MB" @@ -79,6 +81,7 @@ rds-freeable-memory: query: | avg(last_5m):avg:aws.rds.freeable_memory{stage:${ stage }} < 256000000 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{#is_warning}} ({dbinstanceidentifier}) Freeable memory below 512 MB {{/is_warning}} @@ -88,33 +91,34 @@ rds-freeable-memory: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 256000000 - warning: 512000000 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 256000000 + warning: 512000000 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null rds-swap-usage: name: "(RDS) ${tenant} ${ stage } - Swap usage above 256 MB" type: metric alert query: | - avg(last_15m):avg:aws.rds.swap_usage{stage:${ stage }} by {dbinstanceidentifier} > 256000000 + avg(last_15m):avg:aws.rds.swap_usage{stage:${ stage }} by {dbinstanceidentifier,stage,tenant,environment,team} > 256000000 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{#is_warning}} ({dbinstanceidentifier}) Swap usage above 128 MB {{/is_warning}} @@ -124,33 +128,34 @@ rds-swap-usage: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: { } - thresholds: - critical: 256000000 - warning: 128000000 - #unknown: - #ok: - #critical_recovery: - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: { } + thresholds: + critical: 256000000 + warning: 128000000 + #unknown: + #ok: + #critical_recovery: + #warning_recovery: + priority: 3 + restricted_roles: null rds-database-connections: name: "(RDS) ${tenant} ${ stage } - Anomaly of a large variance in RDS connection count" type: metric alert query: | - avg(last_4h):anomalies(avg:aws.rds.database_connections{stage:${ stage }}, 'basic', 2, direction='both', alert_window='last_15m', interval=60, count_default_zero='true') >= 1 + avg(last_4h):anomalies(avg:aws.rds.database_connections{stage:${ stage }} by {dbinstanceidentifier,stage,tenant,environment,team}, 'basic', 2, direction='both', interval=60, alert_window='last_15m', count_default_zero='true') >= 1 message: | + ({{tenant.name}}-{{environment.name}}-{{stage.name}}) {{#is_warning}} ({dbinstanceidentifier}) Anomaly of a large variance in RDS connection count {{/is_warning}} @@ -160,25 +165,25 @@ rds-database-connections: escalation_message: "" tags: managed-by: Terraform - notify_no_data: false - notify_audit: true - require_full_window: true - enable_logs_sample: false - force_delete: true - include_tags: true - locked: false - renotify_interval: 60 - timeout_h: 24 - evaluation_delay: 60 - new_host_delay: 300 - no_data_timeframe: 10 - threshold_windows: - trigger_window: "last_15m" - recovery_window: "last_15m" - thresholds: - critical: 1 - #warning: - #unknown: - #ok: - critical_recovery: 0 - #warning_recovery: + options: + notify_no_data: false + notify_audit: true + require_full_window: true + include_tags: true + renotify_interval: 60 + timeout_h: 24 + evaluation_delay: 60 + new_host_delay: 300 + no_data_timeframe: 10 + threshold_windows: + trigger_window: "last_15m" + recovery_window: "last_15m" + thresholds: + critical: 1 + #warning: + #unknown: + #ok: + critical_recovery: 0 + #warning_recovery: + priority: 3 + restricted_roles: null diff --git a/modules/datadog-monitor/default.auto.tfvars b/modules/datadog-monitor/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/datadog-monitor/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/datadog-monitor/main.tf b/modules/datadog-monitor/main.tf index 78a192940..45a267646 100644 --- a/modules/datadog-monitor/main.tf +++ b/modules/datadog-monitor/main.tf @@ -1,11 +1,5 @@ locals { - enabled = module.this.enabled - asm_enabled = var.secrets_store_type == "ASM" - ssm_enabled = var.secrets_store_type == "SSM" - - # https://docs.datadoghq.com/account_management/api-app-keys/ - datadog_api_key = local.asm_enabled ? data.aws_secretsmanager_secret_version.datadog_api_key[0].secret_string : data.aws_ssm_parameter.datadog_api_key[0].value - datadog_app_key = local.asm_enabled ? data.aws_secretsmanager_secret_version.datadog_app_key[0].secret_string : data.aws_ssm_parameter.datadog_app_key[0].value + enabled = module.this.enabled local_datadog_monitors_enabled = length(var.local_datadog_monitors_config_paths) > 0 remote_datadog_monitors_enabled = length(var.remote_datadog_monitors_config_paths) > 0 @@ -30,8 +24,8 @@ locals { } : {} context_dd_tags = { context_dd_tags = join(",", [ - for k, v in local.context_tags : - v != null ? format("%s:%s", k, v) : k + for k, v in local.context_tags : ( + v != null ? format("%s:%s", k, v) : k) ]) } @@ -47,13 +41,12 @@ locals { message = format("%s%s%s", var.message_prefix, lookup(v.merged, "message", ""), var.message_postfix) }) } - } # Convert all Datadog Monitors from YAML config to Terraform map with token replacement using `parameters` module "remote_datadog_monitors_yaml_config" { source = "cloudposse/config/yaml" - version = "1.0.1" + version = "1.0.2" map_config_remote_base_path = var.remote_datadog_monitors_base_path map_config_paths = var.remote_datadog_monitors_config_paths @@ -69,7 +62,7 @@ module "remote_datadog_monitors_yaml_config" { module "local_datadog_monitors_yaml_config" { source = "cloudposse/config/yaml" - version = "1.0.1" + version = "1.0.2" map_config_local_base_path = abspath(path.module) map_config_paths = var.local_datadog_monitors_config_paths @@ -85,13 +78,15 @@ module "local_datadog_monitors_yaml_config" { module "datadog_monitors_merge" { source = "cloudposse/config/yaml//modules/deepmerge" - version = "1.0.1" + version = "1.0.2" # for_each = { for k, v in local.datadog_monitors_yaml_config_map_configs : k => v if local.datadog_monitors_enabled } - for_each = { for k, v in merge( - module.local_datadog_monitors_yaml_config.map_configs, - module.remote_datadog_monitors_yaml_config.map_configs - ) : k => v if local.datadog_monitors_enabled } + for_each = { + for k, v in merge( + module.local_datadog_monitors_yaml_config.map_configs, + module.remote_datadog_monitors_yaml_config.map_configs + ) : k => v if local.datadog_monitors_enabled + } # Merge in order: datadog monitor, datadog monitor globals, context tags maps = [ @@ -105,7 +100,7 @@ module "datadog_monitors" { count = local.datadog_monitors_enabled ? 1 : 0 source = "cloudposse/platform/datadog//modules/monitors" - version = "1.0.0" + version = "1.4.1" datadog_monitors = local.datadog_monitors diff --git a/modules/datadog-monitor/provider-datadog.tf b/modules/datadog-monitor/provider-datadog.tf new file mode 100644 index 000000000..0b4e862f8 --- /dev/null +++ b/modules/datadog-monitor/provider-datadog.tf @@ -0,0 +1,12 @@ +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} diff --git a/modules/datadog-monitor/providers.tf b/modules/datadog-monitor/providers.tf old mode 100755 new mode 100644 index f95d03446..ef923e10a --- a/modules/datadog-monitor/providers.tf +++ b/modules/datadog-monitor/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,20 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -provider "datadog" { - api_key = local.datadog_api_key - app_key = local.datadog_app_key -} diff --git a/modules/datadog-monitor/ssm.tf b/modules/datadog-monitor/ssm.tf deleted file mode 100644 index 28a23dbae..000000000 --- a/modules/datadog-monitor/ssm.tf +++ /dev/null @@ -1,11 +0,0 @@ -data "aws_ssm_parameter" "datadog_api_key" { - count = local.ssm_enabled ? 1 : 0 - name = format("/%s", var.datadog_api_secret_key) - with_decryption = true -} - -data "aws_ssm_parameter" "datadog_app_key" { - count = local.ssm_enabled ? 1 : 0 - name = format("/%s", var.datadog_app_secret_key) - with_decryption = true -} diff --git a/modules/datadog-monitor/variables.tf b/modules/datadog-monitor/variables.tf index afbdad7cc..58153a3e2 100644 --- a/modules/datadog-monitor/variables.tf +++ b/modules/datadog-monitor/variables.tf @@ -39,42 +39,6 @@ variable "datadog_monitors_config_parameters" { default = {} } -variable "secrets_store_type" { - type = string - description = "Secret store type for Datadog API and app keys. Valid values: `SSM`, `ASM`" - default = "SSM" -} - -variable "datadog_api_secret_key" { - type = string - description = "The key of the Datadog API secret" - default = "datadog/datadog_api_key" -} - -variable "datadog_app_secret_key" { - type = string - description = "The key of the Datadog Application secret" - default = "datadog/datadog_app_key" -} - -variable "role_paths" { - type = list(string) - description = "List of paths to Datadog role configurations" - default = [] -} - -variable "monitors_roles_map" { - type = map(set(string)) - description = "Map of Datadog monitor names to a set of Datadog role names to restrict access to the monitors" - default = {} -} - -variable "datadog_secrets_source_store_account" { - type = string - description = "Account (stage) holding Secret Store for Datadog API and app keys." - default = "corp" -} - variable "datadog_monitor_globals" { type = any description = "Global parameters to add to each monitor" diff --git a/modules/datadog-monitor/versions.tf b/modules/datadog-monitor/versions.tf old mode 100755 new mode 100644 index 9b8e48942..20f566652 --- a/modules/datadog-monitor/versions.tf +++ b/modules/datadog-monitor/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.9.0" } datadog = { source = "datadog/datadog" diff --git a/modules/datadog-private-location-ecs/README.md b/modules/datadog-private-location-ecs/README.md new file mode 100644 index 000000000..4970e455f --- /dev/null +++ b/modules/datadog-private-location-ecs/README.md @@ -0,0 +1,150 @@ +--- +tags: + - component/datadog-private-location-ecs + - layer/datadog + - provider/aws + - provider/datadog +--- + +# Component: `datadog-private-location-ecs` + +This component is responsible for creating a datadog private location and deploying it to ECS (EC2 / Fargate) + +## Usage + +**Note** The app key required for this component requires admin level permissions if you are using the default roles. +Admin's have permissions to Write to private locations, which is needed for this component. + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +# stacks/catalog/datadog/private-location.yaml +components: + terraform: + datadog-private-location: + metadata: + component: datadog-private-location-ecs + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: datadog-private-location + task: + task_memory: 512 + task_cpu: 256 + launch_type: FARGATE + # capacity_provider_strategies takes precedence over launch_type + capacity_provider_strategies: + - capacity_provider: FARGATE_SPOT + weight: 100 + base: null + network_mode: awsvpc + desired_count: 1 + ignore_changes_desired_count: true + ignore_changes_task_definition: false + use_alb_security_group: false + assign_public_ip: false + propagate_tags: SERVICE + wait_for_steady_state: true + circuit_breaker_deployment_enabled: true + circuit_breaker_rollback_enabled: true + containers: + datadog: + name: datadog-private-location + image: public.ecr.aws/datadog/synthetics-private-location-worker:latest + compatibilities: + - EC2 + - FARGATE + - FARGATE_SPOT + log_configuration: + logDriver: awslogs + options: {} + port_mappings: [] +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [datadog](#requirement\_datadog) | >= 3.3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [datadog](#provider\_datadog) | >= 3.3.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [container\_definition](#module\_container\_definition) | cloudposse/ecs-container-definition/aws | 0.58.1 | +| [datadog\_configuration](#module\_datadog\_configuration) | ../datadog-configuration/modules/datadog_keys | n/a | +| [ecs\_alb\_service\_task](#module\_ecs\_alb\_service\_task) | cloudposse/ecs-alb-service-task/aws | 0.66.2 | +| [ecs\_cluster](#module\_ecs\_cluster) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [roles\_to\_principals](#module\_roles\_to\_principals) | ../account-map/modules/roles-to-principals | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [datadog_synthetics_private_location.private_location](https://registry.terraform.io/providers/datadog/datadog/latest/docs/resources/synthetics_private_location) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [alb\_configuration](#input\_alb\_configuration) | The configuration to use for the ALB, specifying which cluster alb configuration to use | `string` | `"default"` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [containers](#input\_containers) | Feed inputs into container definition module | `any` | `{}` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [private\_location\_description](#input\_private\_location\_description) | The description of the private location. | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [task](#input\_task) | Feed inputs into ecs\_alb\_service\_task module | `any` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [ecs\_cluster\_arn](#output\_ecs\_cluster\_arn) | Selected ECS cluster ARN | +| [lb\_arn](#output\_lb\_arn) | Selected LB ARN | +| [lb\_listener\_https](#output\_lb\_listener\_https) | Selected LB HTTPS Listener | +| [lb\_sg\_id](#output\_lb\_sg\_id) | Selected LB SG ID | +| [subnet\_ids](#output\_subnet\_ids) | Selected subnet IDs | +| [vpc\_id](#output\_vpc\_id) | Selected VPC ID | +| [vpc\_sg\_id](#output\_vpc\_sg\_id) | Selected VPC SG ID | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/ecs-service) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/datadog-private-location-ecs/context.tf b/modules/datadog-private-location-ecs/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/datadog-private-location-ecs/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/datadog-private-location-ecs/main.tf b/modules/datadog-private-location-ecs/main.tf new file mode 100644 index 000000000..9bb93edcc --- /dev/null +++ b/modules/datadog-private-location-ecs/main.tf @@ -0,0 +1,120 @@ +locals { + enabled = module.this.enabled + + container_definition = concat([ + for container in module.container_definition : + container.json_map_object + ], + ) + datadog_location_config = try(jsondecode(datadog_synthetics_private_location.private_location[0].config), null) + +} + +module "roles_to_principals" { + source = "../account-map/modules/roles-to-principals" + context = module.this.context + role_map = {} +} + +resource "datadog_synthetics_private_location" "private_location" { + count = local.enabled ? 1 : 0 + + name = module.this.id + description = coalesce(var.private_location_description, format("Private location for %s", module.this.id)) + tags = module.datadog_configuration.datadog_tags +} + +module "container_definition" { + source = "cloudposse/ecs-container-definition/aws" + version = "0.58.1" + + depends_on = [datadog_synthetics_private_location.private_location] + + for_each = { for k, v in var.containers : k => v if local.enabled } + + container_name = lookup(each.value, "name") + + container_image = lookup(each.value, "image") + + container_memory = lookup(each.value, "memory", null) + container_memory_reservation = lookup(each.value, "memory_reservation", null) + container_cpu = lookup(each.value, "cpu", null) + essential = lookup(each.value, "essential", true) + readonly_root_filesystem = lookup(each.value, "readonly_root_filesystem", null) + + map_environment = merge( + lookup(each.value, "map_environment", {}), + { "APP_ENV" = format("%s-%s-%s-%s", var.namespace, var.tenant, var.environment, var.stage) }, + { "RUNTIME_ENV" = format("%s-%s-%s", var.namespace, var.tenant, var.stage) }, + { "CLUSTER_NAME" = module.ecs_cluster.outputs.cluster_name }, + { "DATADOG_SITE" = module.datadog_configuration.datadog_site }, + { "DATADOG_API_KEY" = module.datadog_configuration.datadog_api_key }, + { "DATADOG_ACCESS_KEY" = local.datadog_location_config.accessKey }, + { "DATADOG_SECRET_ACCESS_KEY" = local.datadog_location_config.secretAccessKey }, + { "DATADOG_PUBLIC_KEY_PEM" = local.datadog_location_config.publicKey.pem }, + { "DATADOG_PUBLIC_KEY_FINGERPRINT" = local.datadog_location_config.publicKey.fingerprint }, + { "DATADOG_PRIVATE_KEY" = local.datadog_location_config.privateKey }, + { "DATADOG_LOCATION_ID" = local.datadog_location_config.id }, + ) + + map_secrets = lookup(each.value, "map_secrets", null) != null ? zipmap( + keys(lookup(each.value, "map_secrets", null)), + formatlist("%s/%s", format("arn:aws:ssm:%s:%s:parameter", + var.region, module.roles_to_principals.full_account_map[format("%s-%s", var.tenant, var.stage)]), + values(lookup(each.value, "map_secrets", null))) + ) : null + port_mappings = lookup(each.value, "port_mappings", []) + command = lookup(each.value, "command", null) + entrypoint = lookup(each.value, "entrypoint", null) + healthcheck = lookup(each.value, "healthcheck", null) + ulimits = lookup(each.value, "ulimits", null) + volumes_from = lookup(each.value, "volumes_from", null) + docker_labels = lookup(each.value, "docker_labels", null) + + firelens_configuration = lookup(each.value, "firelens_configuration", null) + + # escape hatch for anything not specifically described above or unsupported by the upstream module + container_definition = lookup(each.value, "container_definition", {}) +} + +module "ecs_alb_service_task" { + source = "cloudposse/ecs-alb-service-task/aws" + version = "0.66.2" + + count = var.enabled ? 1 : 0 + + ecs_cluster_arn = local.ecs_cluster_arn + vpc_id = local.vpc_id + subnet_ids = local.subnet_ids + + container_definition_json = jsonencode(local.container_definition) + + # This is set to true to allow ingress from the ALB sg + use_alb_security_group = lookup(var.task, "use_alb_security_group", true) + alb_security_group = local.lb_sg_id + security_group_ids = [local.vpc_sg_id] + + # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_service#load_balancer + ecs_load_balancers = [] + + assign_public_ip = false + ignore_changes_task_definition = lookup(var.task, "ignore_changes_task_definition", false) + ignore_changes_desired_count = lookup(var.task, "ignore_changes_desired_count", true) + launch_type = lookup(var.task, "launch_type", "FARGATE") + network_mode = lookup(var.task, "network_mode", "awsvpc") + propagate_tags = lookup(var.task, "propagate_tags", "SERVICE") + deployment_minimum_healthy_percent = lookup(var.task, "deployment_minimum_healthy_percent", null) + deployment_maximum_percent = lookup(var.task, "deployment_maximum_percent", null) + deployment_controller_type = lookup(var.task, "deployment_controller_type", null) + desired_count = lookup(var.task, "desired_count", 0) + task_memory = lookup(var.task, "task_memory", null) + task_cpu = lookup(var.task, "task_cpu", null) + wait_for_steady_state = lookup(var.task, "wait_for_steady_state", true) + circuit_breaker_deployment_enabled = lookup(var.task, "circuit_breaker_deployment_enabled", true) + circuit_breaker_rollback_enabled = lookup(var.task, "circuit_breaker_rollback_enabled ", true) + task_policy_arns = [] + ecs_service_enabled = lookup(var.task, "ecs_service_enabled", true) + capacity_provider_strategies = lookup(var.task, "capacity_provider_strategies", []) + + context = module.this.context +} diff --git a/modules/datadog-private-location-ecs/outputs.tf b/modules/datadog-private-location-ecs/outputs.tf new file mode 100644 index 000000000..f6e25b29a --- /dev/null +++ b/modules/datadog-private-location-ecs/outputs.tf @@ -0,0 +1,34 @@ +output "ecs_cluster_arn" { + value = local.ecs_cluster_arn + description = "Selected ECS cluster ARN" +} + +output "subnet_ids" { + value = local.subnet_ids + description = "Selected subnet IDs" +} + +output "vpc_id" { + value = local.vpc_id + description = "Selected VPC ID" +} + +output "vpc_sg_id" { + value = local.vpc_sg_id + description = "Selected VPC SG ID" +} + +output "lb_sg_id" { + value = local.lb_sg_id + description = "Selected LB SG ID" +} + +output "lb_arn" { + value = local.lb_arn + description = "Selected LB ARN" +} + +output "lb_listener_https" { + value = local.lb_listener_https_arn + description = "Selected LB HTTPS Listener" +} diff --git a/modules/datadog-private-location-ecs/provider-datadog.tf b/modules/datadog-private-location-ecs/provider-datadog.tf new file mode 100644 index 000000000..0b4e862f8 --- /dev/null +++ b/modules/datadog-private-location-ecs/provider-datadog.tf @@ -0,0 +1,12 @@ +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} diff --git a/modules/datadog-private-location-ecs/providers.tf b/modules/datadog-private-location-ecs/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/datadog-private-location-ecs/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/datadog-private-location-ecs/remote-state.tf b/modules/datadog-private-location-ecs/remote-state.tf new file mode 100644 index 000000000..2a3b6161b --- /dev/null +++ b/modules/datadog-private-location-ecs/remote-state.tf @@ -0,0 +1,28 @@ +locals { + vpc_id = module.vpc.outputs.vpc_id + vpc_sg_id = module.vpc.outputs.vpc_default_security_group_id + subnet_ids = lookup(module.vpc.outputs.subnets, "private", { ids = [] }).ids + ecs_cluster_arn = module.ecs_cluster.outputs.cluster_arn + + lb_arn = try(module.ecs_cluster.outputs.alb[var.alb_configuration].alb_arn, null) + lb_listener_https_arn = try(module.ecs_cluster.outputs.alb[var.alb_configuration].https_listener_arn, null) + lb_sg_id = try(module.ecs_cluster.outputs.alb[var.alb_configuration].security_group_id, null) +} + +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "vpc" + + context = module.this.context +} + +module "ecs_cluster" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "ecs/cluster" + + context = module.this.context +} diff --git a/modules/datadog-private-location-ecs/variables.tf b/modules/datadog-private-location-ecs/variables.tf new file mode 100644 index 000000000..9464172e6 --- /dev/null +++ b/modules/datadog-private-location-ecs/variables.tf @@ -0,0 +1,28 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "private_location_description" { + type = string + description = "The description of the private location." + default = null +} + +variable "containers" { + type = any + description = "Feed inputs into container definition module" + default = {} +} + +variable "task" { + type = any + description = "Feed inputs into ecs_alb_service_task module" + default = {} +} + +variable "alb_configuration" { + type = string + description = "The configuration to use for the ALB, specifying which cluster alb configuration to use" + default = "default" +} diff --git a/modules/datadog-private-location-ecs/versions.tf b/modules/datadog-private-location-ecs/versions.tf new file mode 100644 index 000000000..f636a1364 --- /dev/null +++ b/modules/datadog-private-location-ecs/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + datadog = { + source = "datadog/datadog" + version = ">= 3.3.0" + } + } +} diff --git a/modules/datadog-synthetics-private-location/CHANGELOG.md b/modules/datadog-synthetics-private-location/CHANGELOG.md new file mode 100644 index 000000000..ee538026d --- /dev/null +++ b/modules/datadog-synthetics-private-location/CHANGELOG.md @@ -0,0 +1,13 @@ +## PR [#814](https://github.com/cloudposse/terraform-aws-components/pull/814) + +### Possible Breaking Change + +Previously this component directly created the Kubernetes namespace for the agent when `create_namespace` was set to +`true`. Now this component delegates that responsibility to the `helm-release` module, which better coordinates the +destruction of resources at destruction time (for example, ensuring that the Helm release is completely destroyed and +finalizers run before deleting the namespace). + +Generally the simplest upgrade path is to destroy the Helm release, then destroy the namespace, then apply the new +configuration. Alternatively, you can use `terraform state mv` to move the existing namespace to the new Terraform +"address", which will preserve the existing deployment and reduce the possibility of the destroy failing and leaving the +Kubernetes cluster in a bad state. diff --git a/modules/datadog-synthetics-private-location/README.md b/modules/datadog-synthetics-private-location/README.md new file mode 100644 index 000000000..0d78ced6f --- /dev/null +++ b/modules/datadog-synthetics-private-location/README.md @@ -0,0 +1,234 @@ +--- +tags: + - component/datadog-synthetics-private-location + - layer/datadog + - provider/aws + - provider/datadog +--- + +# Component: `datadog-synthetics-private-location` + +This component provisions a Datadog synthetics private location on Datadog and a private location agent on EKS cluster. + +Private locations allow you to monitor internal-facing applications or any private URLs that are not accessible from the +public internet. + +## Usage + +**Stack Level**: Regional + +Use this in the catalog or use these variables to overwrite the catalog values. + +```yaml +components: + terraform: + datadog-synthetics-private-location: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: "datadog-synthetics-private-location" + description: "Datadog Synthetics Private Location Agent" + kubernetes_namespace: "monitoring" + create_namespace: true + # https://github.com/DataDog/helm-charts/tree/main/charts/synthetics-private-location + repository: "https://helm.datadoghq.com" + chart: "synthetics-private-location" + chart_version: "0.15.15" + timeout: 180 + wait: true + atomic: true + cleanup_on_fail: true +``` + +## Synthetics Private Location Config + +```shell +docker run --rm datadog/synthetics-private-location-worker --help +``` + +``` +The Datadog Synthetics Private Location Worker runs tests on privately accessible websites and brings results to Datadog + +Access keys: + --accessKey Access Key for Datadog API authentication [string] + --secretAccessKey Secret Access Key for Datadog API authentication [string] + --datadogApiKey Datadog API key to send browser tests artifacts (e.g. screenshots) [string] + --privateKey Private Key used to decrypt test configurations [array] + --publicKey Public Key used by Datadog to encrypt test results. Composed of --publicKey.pem and --publicKey.fingerprint + +Worker configuration: + --site Datadog site (datadoghq.com, us3.datadoghq.com, datadoghq.eu or ddog-gov.com) [string] [required] [default: "datadoghq.com"] + --concurrency Maximum number of tests executed in parallel [number] [default: 10] + --maxNumberMessagesToFetch Maximum number of tests that can be fetched at the same time [number] [default: 10] + --proxyDatadog Proxy URL used to send requests to Datadog [string] [default: none] + --dumpConfig Display non-secret worker configuration parameters [boolean] + --enableStatusProbes Enable the probes system for Kubernetes [boolean] [default: false] + --statusProbesPort The port for the probes server to listen on [number] [default: 8080] + --config Path to JSON config file [default: "/etc/datadog/synthetics-check-runner.json"] + +Tests configuration: + --maxTimeout Maximum test execution duration, in milliseconds [number] [default: 60000] + --proxyTestRequests Proxy URL used to send test requests [string] [default: none] + --proxyIgnoreSSLErrors Discard SSL errors when using a proxy [boolean] [default: false] + --dnsUseHost Use local DNS config for API tests and HTTP steps in browser tests (currently ["192.168.65.5"]) [boolean] [default: true] + --dnsServer DNS server IPs used in given order for API tests and HTTP steps in browser tests (--dnsServer="1.0.0.1" --dnsServer="9.9.9.9") and after local DNS config, if --dnsUseHost is present [array] [default: ["8.8.8.8","1.1.1.1"]] + +Network filtering: + --allowedIPRanges Grant access to IP ranges (has precedence over --blockedIPRanges) [default: none] + --blockedIPRanges Deny access to IP ranges (e.g. --blockedIPRanges.4="127.0.0.0/8" --blockedIPRanges.6="::1/128") [default: none] + --enableDefaultBlockedIpRanges Deny access to all reserved IP ranges, except for those explicitly set in --allowedIPRanges [boolean] [default: false] + --allowedDomainNames Grant access to domain names for API tests (has precedence over --blockedDomainNames, e.g. --allowedDomainNames="*.example.com") [array] [default: none] + --blockedDomainNames Deny access to domain names for API tests (e.g. --blockedDomainNames="example.org" --blockedDomainNames="*.com") [array] [default: none] + +Options: + --enableIPv6 Use IPv6 to perform tests. (Warning: IPv6 in Docker is only supported with Linux host) [boolean] [default: false] + --version Show version number [boolean] + -f, --logFormat Format log output [choices: "pretty", "pretty-compact", "json"] [default: "pretty"] + -h, --help Show help [boolean] + +Volumes: + /etc/datadog/certs/ .pem certificates present in this directory will be imported and trusted as certificate authorities for API and browser tests + +Environment variables: + Command options can also be set via environment variables (DATADOG_API_KEY="...", DATADOG_WORKER_CONCURRENCY="15", DATADOG_DNS_USE_HOST="true") + For options that accept multiple arguments, JSON string array notation should be used (DATADOG_TESTS_DNS_SERVER='["8.8.8.8", "1.1.1.1"]') + + Supported environment variables: + DATADOG_ACCESS_KEY, + DATADOG_API_KEY, + DATADOG_PRIVATE_KEY, + DATADOG_PUBLIC_KEY_FINGERPRINT, + DATADOG_PUBLIC_KEY_PEM, + DATADOG_SECRET_ACCESS_KEY, + DATADOG_SITE, + DATADOG_WORKER_CONCURRENCY, + DATADOG_WORKER_LOG_FORMAT, + DATADOG_WORKER_MAX_NUMBER_MESSAGES_TO_FETCH, + DATADOG_WORKER_PROXY, + DATADOG_TESTS_DNS_SERVER, + DATADOG_TESTS_DNS_USE_HOST, + DATADOG_TESTS_PROXY, + DATADOG_TESTS_PROXY_IGNORE_SSL_ERRORS, + DATADOG_TESTS_TIMEOUT, + DATADOG_ALLOWED_IP_RANGES_4, + DATADOG_ALLOWED_IP_RANGES_6, + DATADOG_BLOCKED_IP_RANGES_4, + DATADOG_BLOCKED_IP_RANGES_6, + DATADOG_ENABLE_DEFAULT_WINDOWS_FIREWALL_RULES, + DATADOG_ALLOWED_DOMAIN_NAMES, + DATADOG_BLOCKED_DOMAIN_NAMES, + DATADOG_WORKER_ENABLE_STATUS_PROBES, + DATADOG_WORKER_STATUS_PROBES_PORT +``` + +## References + +- https://docs.datadoghq.com/synthetics/private_locations +- https://docs.datadoghq.com/synthetics/private_locations/configuration/ +- https://github.com/DataDog/helm-charts/tree/main/charts/synthetics-private-location +- https://github.com/DataDog/helm-charts/blob/main/charts/synthetics-private-location/values.yaml + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [datadog](#requirement\_datadog) | >= 3.3.0 | +| [helm](#requirement\_helm) | >= 2.3.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.14.0, != 2.21.0 | +| [local](#requirement\_local) | >= 1.3 | +| [template](#requirement\_template) | >= 2.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [datadog](#provider\_datadog) | >= 3.3.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [datadog\_configuration](#module\_datadog\_configuration) | ../datadog-configuration/modules/datadog_keys | n/a | +| [datadog\_synthetics\_private\_location](#module\_datadog\_synthetics\_private\_location) | cloudposse/helm-release/aws | 0.10.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [datadog_synthetics_private_location.this](https://registry.terraform.io/providers/datadog/datadog/latest/docs/resources/synthetics_private_location) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended | `string` | n/a | yes | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed | `string` | `null` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_namespace](#input\_create\_namespace) | Create the Kubernetes namespace if it does not yet exist | `bool` | `true` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [description](#input\_description) | Release description attribute (visible in the history) | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | Kubernetes namespace to install the release into | `string` | n/a | yes | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [private\_location\_tags](#input\_private\_location\_tags) | List of static tags to associate with the synthetics private location | `set(string)` | `[]` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [repository](#input\_repository) | Repository URL where to locate the requested chart | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | +| [verify](#input\_verify) | Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart | `bool` | `false` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true` | `bool` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [metadata](#output\_metadata) | Block status of the deployed release | +| [synthetics\_private\_location\_id](#output\_synthetics\_private\_location\_id) | Synthetics private location ID | + + + +## References + +- https://docs.datadoghq.com/getting_started/synthetics/private_location +- https://docs.datadoghq.com/synthetics/private_locations/configuration +- https://registry.terraform.io/providers/DataDog/datadog/latest/docs/resources/synthetics_private_location +- https://github.com/DataDog/helm-charts/tree/main/charts/synthetics-private-location diff --git a/modules/datadog-synthetics-private-location/context.tf b/modules/datadog-synthetics-private-location/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/datadog-synthetics-private-location/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/datadog-agent/helm-variables.tf b/modules/datadog-synthetics-private-location/helm-variables.tf similarity index 100% rename from modules/datadog-agent/helm-variables.tf rename to modules/datadog-synthetics-private-location/helm-variables.tf diff --git a/modules/datadog-synthetics-private-location/main.tf b/modules/datadog-synthetics-private-location/main.tf new file mode 100644 index 000000000..107cfc8eb --- /dev/null +++ b/modules/datadog-synthetics-private-location/main.tf @@ -0,0 +1,62 @@ +locals { + enabled = module.this.enabled + + # https://docs.datadoghq.com/synthetics/private_locations/configuration + # docker run --rm datadog/synthetics-private-location-worker --help + private_location_config = jsondecode(join("", datadog_synthetics_private_location.this.*.config)) +} + +resource "datadog_synthetics_private_location" "this" { + count = local.enabled ? 1 : 0 + name = module.this.id + description = module.this.id + tags = var.private_location_tags +} + + +module "datadog_synthetics_private_location" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + name = module.this.name + chart = var.chart + description = var.description + repository = var.repository + chart_version = var.chart_version + + kubernetes_namespace = var.kubernetes_namespace + + # Usually set to `false` if deploying eks/datadog-agent, since namespace will already be created + create_namespace_with_kubernetes = var.create_namespace + + verify = var.verify + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + eks_cluster_oidc_issuer_url = module.eks.outputs.eks_cluster_identity_oidc_issuer + + service_account_name = module.this.name + service_account_namespace = var.kubernetes_namespace + + iam_role_enabled = false + + values = [ + templatefile( + "${path.module}/values.yaml.tpl", + { + id = local.private_location_config.id, + datadogApiKey = module.datadog_configuration.datadog_api_key, + accessKey = local.private_location_config.accessKey, + secretAccessKey = local.private_location_config.secretAccessKey, + privateKey = replace(local.private_location_config.privateKey, "\n", "\n "), + publicKey_pem = replace(local.private_location_config.publicKey.pem, "\n", "\n "), + publicKey_fingerprint = local.private_location_config.publicKey.fingerprint, + site = local.private_location_config.site + } + ) + ] + + context = module.this.context +} diff --git a/modules/datadog-synthetics-private-location/outputs.tf b/modules/datadog-synthetics-private-location/outputs.tf new file mode 100644 index 000000000..1958fd6de --- /dev/null +++ b/modules/datadog-synthetics-private-location/outputs.tf @@ -0,0 +1,9 @@ +output "synthetics_private_location_id" { + value = one(datadog_synthetics_private_location.this[*].id) + description = "Synthetics private location ID" +} + +output "metadata" { + value = local.enabled ? module.datadog_synthetics_private_location.metadata : null + description = "Block status of the deployed release" +} diff --git a/modules/datadog-synthetics-private-location/provider-datadog.tf b/modules/datadog-synthetics-private-location/provider-datadog.tf new file mode 100644 index 000000000..0b4e862f8 --- /dev/null +++ b/modules/datadog-synthetics-private-location/provider-datadog.tf @@ -0,0 +1,12 @@ +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} diff --git a/modules/datadog-synthetics-private-location/provider-helm.tf b/modules/datadog-synthetics-private-location/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/modules/datadog-synthetics-private-location/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/datadog-synthetics-private-location/providers.tf b/modules/datadog-synthetics-private-location/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/datadog-synthetics-private-location/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/datadog-synthetics-private-location/remote-state.tf b/modules/datadog-synthetics-private-location/remote-state.tf new file mode 100644 index 000000000..c1ec8226d --- /dev/null +++ b/modules/datadog-synthetics-private-location/remote-state.tf @@ -0,0 +1,8 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} diff --git a/modules/datadog-synthetics-private-location/values.yaml.tpl b/modules/datadog-synthetics-private-location/values.yaml.tpl new file mode 100644 index 000000000..d2b4c23e2 --- /dev/null +++ b/modules/datadog-synthetics-private-location/values.yaml.tpl @@ -0,0 +1,34 @@ +replicaCount: 1 + +podAnnotations: { } + +serviceAccount: + create: true + name: "datadog-synthetics-private-location" + +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + +configFile: |- + { + "id": "${id}", + "datadogApiKey": "${datadogApiKey}", + "accessKey": "${accessKey}", + "secretAccessKey": "${secretAccessKey}", + "site": "${site}" + } + +env: + - name: DATADOG_PRIVATE_KEY + value: |- + ${privateKey} + - name: DATADOG_PUBLIC_KEY_PEM + value: |- + ${publicKey_pem} + - name: DATADOG_PUBLIC_KEY_FINGERPRINT + value: ${publicKey_fingerprint} diff --git a/modules/datadog-synthetics-private-location/variables.tf b/modules/datadog-synthetics-private-location/variables.tf new file mode 100644 index 000000000..ac1088260 --- /dev/null +++ b/modules/datadog-synthetics-private-location/variables.tf @@ -0,0 +1,16 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "private_location_tags" { + type = set(string) + description = "List of static tags to associate with the synthetics private location" + default = [] +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} diff --git a/modules/datadog-synthetics-private-location/versions.tf b/modules/datadog-synthetics-private-location/versions.tf new file mode 100644 index 000000000..fe99a3345 --- /dev/null +++ b/modules/datadog-synthetics-private-location/versions.tf @@ -0,0 +1,30 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + template = { + source = "hashicorp/template" + version = ">= 2.0" + } + local = { + source = "hashicorp/local" + version = ">= 1.3" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.3.0" + } + datadog = { + source = "datadog/datadog" + version = ">= 3.3.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.14.0, != 2.21.0" + } + } +} diff --git a/modules/datadog-synthetics/CHANGELOG.md b/modules/datadog-synthetics/CHANGELOG.md new file mode 100644 index 000000000..f9ccb06db --- /dev/null +++ b/modules/datadog-synthetics/CHANGELOG.md @@ -0,0 +1,19 @@ +## Changes approximately v1.329.0 + +### API Schema accepted + +Test can now be defined using the Datadog API schema, meaning that the test definition returned by + +- `https://api.datadoghq.com/api/v1/synthetics/tests/api/{public_id}` +- `https://api.datadoghq.com/api/v1/synthetics/tests/browser/{public_id}` + +can be directly used a map value (you still need to supply a key, though). + +You can mix tests using the API schema with tests using the old Terraform schema. You could probably get away with +mixing them in the same test, but it is not recommended. + +### Default locations + +Previously, the default locations for Synthetics tests were "all" public locations. Now the default is no locations, in +favor of locations being specified in each test configuration, which is more flexible. Also, since the tests are +expensive, it is better to err on the side of too few test locations than too many. diff --git a/modules/datadog-synthetics/README.md b/modules/datadog-synthetics/README.md new file mode 100644 index 000000000..aba1801e7 --- /dev/null +++ b/modules/datadog-synthetics/README.md @@ -0,0 +1,248 @@ +--- +tags: + - component/datadog-synthetics + - layer/datadog + - provider/aws + - provider/datadog +--- + +# Component: `datadog-synthetics` + +This component provides the ability to implement +[Datadog synthetic tests](https://docs.datadoghq.com/synthetics/guide/). + +Synthetic tests allow you to observe how your systems and applications are performing using simulated requests and +actions from the AWS managed locations around the globe, and to monitor internal endpoints from +[Private Locations](https://docs.datadoghq.com/synthetics/private_locations). + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component: + +### Stack Configuration + +```yaml +components: + terraform: + datadog-synthetics: + metadata: + component: "datadog-synthetics" + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: "datadog-synthetics" + locations: + - "all" + # List of paths to Datadog synthetic test configurations + synthetics_paths: + - "catalog/synthetics/examples/*.yaml" + synthetics_private_location_component_name: "datadog-synthetics-private-location" + private_location_test_enabled: true +``` + +### Synthetics Configuration Examples + +Below are examples of Datadog browser and API synthetic tests. + +The synthetic tests are defined in YAML using either the +[Datadog Terraform provider](https://registry.terraform.io/providers/DataDog/datadog/latest/docs/resources/synthetics_test) +schema or the [Datadog Synthetics API](https://docs.datadoghq.com/api/latest/synthetics) schema. See the +`terraform-datadog-platform` Terraform module +[README](https://github.com/cloudposse/terraform-datadog-platform/blob/main/modules/synthetics/README.md) for more +details. We recommend using the API schema so you can more create and edit tests using the Datadog web API and then +import them into this module by downloading the test using the Datadog REST API. (See the Datadog API documentation for +the appropriate `curl` commands to use.) + +```yaml +# API schema +my-browser-test: + name: My Browser Test + status: live + type: browser + config: + request: + method: GET + headers: {} + url: https://example.com/login + setCookie: |- + DatadogTest=true + message: "My Browser Test Failed" + options: + device_ids: + - chrome.laptop_large + - edge.tablet + - firefox.mobile_small + ignoreServerCertificateError: false + disableCors: false + disableCsp: false + noScreenshot: false + tick_every: 86400 + min_failure_duration: 0 + min_location_failed: 1 + retry: + count: 0 + interval: 300 + monitor_options: + renotify_interval: 0 + ci: + executionRule: non_blocking + rumSettings: + isEnabled: false + enableProfiling: false + enableSecurityTesting: false + locations: + - aws:us-east-1 + - aws:us-west-2 + +# Terraform schema +my-api-test: + name: "API Test" + message: "API Test Failed" + type: api + subtype: http + tags: + - "managed-by:Terraform" + status: "live" + request_definition: + url: "CHANGEME" + method: GET + request_headers: + Accept-Charset: "utf-8, iso-8859-1;q=0.5" + Accept: "text/json" + options_list: + tick_every: 1800 + no_screenshot: false + follow_redirects: true + retry: + count: 2 + interval: 10 + monitor_options: + renotify_interval: 300 + assertion: + - type: statusCode + operator: is + target: "200" + - type: body + operator: validatesJSONPath + targetjsonpath: + operator: is + targetvalue: true + jsonpath: foo.bar +``` + +These configuration examples are defined in the YAML files in the +[catalog/synthetics/examples](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/datadog-synthetics/catalog/synthetics/examples) +folder. + +You can use different subfolders for your use-case. For example, you can have `dev` and `prod` subfolders to define +different synthetic tests for the `dev` and `prod` environments. + +Then use the `synthetic_paths` variable to point the component to the synthetic test configuration files. + +The configuration files are processed and transformed in the following order: + +- The `datadog-synthetics` component loads the YAML configuration files from the filesystem paths specified by the + `synthetics_paths` variable + +- Then, in the + [synthetics](https://github.com/cloudposse/terraform-datadog-platform/blob/master/modules/synthetics/main.tf) module, + the YAML configuration files are merged and transformed from YAML into the + [Datadog Terraform provider](https://registry.terraform.io/providers/DataDog/datadog/latest/docs/resources/synthetics_test) + schema + +- And finally, the Datadog Terraform provider uses the + [Datadog Synthetics API](https://docs.datadoghq.com/api/latest/synthetics) specifications to call the Datadog API and + provision the synthetic tests + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [datadog](#requirement\_datadog) | >= 3.3.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [datadog\_configuration](#module\_datadog\_configuration) | ../datadog-configuration/modules/datadog_keys | n/a | +| [datadog\_synthetics](#module\_datadog\_synthetics) | cloudposse/platform/datadog//modules/synthetics | 1.3.0 | +| [datadog\_synthetics\_merge](#module\_datadog\_synthetics\_merge) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | +| [datadog\_synthetics\_private\_location](#module\_datadog\_synthetics\_private\_location) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [datadog\_synthetics\_yaml\_config](#module\_datadog\_synthetics\_yaml\_config) | cloudposse/config/yaml | 1.0.2 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [alert\_tags](#input\_alert\_tags) | List of alert tags to add to all alert messages, e.g. `["@opsgenie"]` or `["@devops", "@opsgenie"]` | `list(string)` | `null` | no | +| [alert\_tags\_separator](#input\_alert\_tags\_separator) | Separator for the alert tags. All strings from the `alert_tags` variable will be joined into one string using the separator and then added to the alert message | `string` | `"\n"` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [config\_parameters](#input\_config\_parameters) | Map of parameter values to interpolate into Datadog Synthetic configurations | `map(any)` | `{}` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [context\_tags](#input\_context\_tags) | List of context tags to add to each synthetic check | `set(string)` |
[
"namespace",
"tenant",
"environment",
"stage"
]
| no | +| [context\_tags\_enabled](#input\_context\_tags\_enabled) | Whether to add context tags to add to each synthetic check | `bool` | `true` | no | +| [datadog\_synthetics\_globals](#input\_datadog\_synthetics\_globals) | Map of keys to add to every monitor | `any` | `{}` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [locations](#input\_locations) | Array of locations used to run synthetic tests | `list(string)` | `[]` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [private\_location\_test\_enabled](#input\_private\_location\_test\_enabled) | Use private locations or the public locations provided by datadog | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [synthetics\_paths](#input\_synthetics\_paths) | List of paths to Datadog synthetic test configurations | `list(string)` | n/a | yes | +| [synthetics\_private\_location\_component\_name](#input\_synthetics\_private\_location\_component\_name) | The name of the Datadog synthetics private location component | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [datadog\_synthetics\_test\_ids](#output\_datadog\_synthetics\_test\_ids) | IDs of the created Datadog synthetic tests | +| [datadog\_synthetics\_test\_maps](#output\_datadog\_synthetics\_test\_maps) | Map (name: id) of the created Datadog synthetic tests | +| [datadog\_synthetics\_test\_monitor\_ids](#output\_datadog\_synthetics\_test\_monitor\_ids) | IDs of the monitors associated with the Datadog synthetics tests | +| [datadog\_synthetics\_test\_names](#output\_datadog\_synthetics\_test\_names) | Names of the created Datadog synthetic tests | + + + +## References + +- [Datadog Synthetics](https://docs.datadoghq.com/synthetics) +- [Getting Started with Synthetic Monitoring](https://docs.datadoghq.com/getting_started/synthetics) +- [Synthetic Monitoring Guides](https://docs.datadoghq.com/synthetics/guide) +- [Using Synthetic Test Monitors](https://docs.datadoghq.com/synthetics/guide/synthetic-test-monitors) +- [Create An API Test With The API](https://docs.datadoghq.com/synthetics/guide/create-api-test-with-the-api) +- [Manage Your Browser Tests Programmatically](https://docs.datadoghq.com/synthetics/guide/manage-browser-tests-through-the-api) +- [Browser Tests](https://docs.datadoghq.com/synthetics/browser_tests) +- [Synthetics API](https://docs.datadoghq.com/api/latest/synthetics) +- [Terraform resource `datadog_synthetics_test`](https://registry.terraform.io/providers/DataDog/datadog/latest/docs/resources/synthetics_test) + +[](https://cpco.io/component) diff --git a/modules/datadog-synthetics/catalog/synthetics/examples/api-test.yaml b/modules/datadog-synthetics/catalog/synthetics/examples/api-test.yaml new file mode 100644 index 000000000..1a8b8b240 --- /dev/null +++ b/modules/datadog-synthetics/catalog/synthetics/examples/api-test.yaml @@ -0,0 +1,33 @@ +my-api-test: + name: "API Test" + message: "API Test Failed" + type: api + subtype: http + tags: + - "managed-by:Terraform" + status: "live" + request_definition: + url: "CHANGEME" + method: GET + request_headers: + Accept-Charset: "utf-8, iso-8859-1;q=0.5" + Accept: "text/json" + options_list: + tick_every: 1800 + no_screenshot: false + follow_redirects: true + retry: + count: 2 + interval: 10 + monitor_options: + renotify_interval: 300 + assertion: + - type: statusCode + operator: is + target: "200" + - type: body + operator: validatesJSONPath + targetjsonpath: + operator: is + targetvalue: true + jsonpath: foo.bar diff --git a/modules/datadog-synthetics/catalog/synthetics/examples/browser-test.yaml b/modules/datadog-synthetics/catalog/synthetics/examples/browser-test.yaml new file mode 100644 index 000000000..f623d6523 --- /dev/null +++ b/modules/datadog-synthetics/catalog/synthetics/examples/browser-test.yaml @@ -0,0 +1,38 @@ +my-browser-test: + name: My Browser Test + status: live + type: browser + config: + request: + method: GET + headers: {} + url: https://example.com/login + setCookie: |- + DatadogTest=true + message: "My Browser Test Failed" + options: + device_ids: + - chrome.laptop_large + - edge.tablet + - firefox.mobile_small + ignoreServerCertificateError: false + disableCors: false + disableCsp: false + noScreenshot: false + tick_every: 86400 + min_failure_duration: 0 + min_location_failed: 1 + retry: + count: 0 + interval: 300 + monitor_options: + renotify_interval: 0 + ci: + executionRule: non_blocking + rumSettings: + isEnabled: false + enableProfiling: false + enableSecurityTesting: false + locations: + - aws:us-east-1 + - aws:us-west-2 diff --git a/modules/datadog-synthetics/context.tf b/modules/datadog-synthetics/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/datadog-synthetics/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/datadog-synthetics/main.tf b/modules/datadog-synthetics/main.tf new file mode 100644 index 000000000..0cf1493e8 --- /dev/null +++ b/modules/datadog-synthetics/main.tf @@ -0,0 +1,68 @@ +locals { + enabled = module.this.enabled + + datadog_synthetics_private_location_id = module.datadog_synthetics_private_location.outputs.synthetics_private_location_id + + # Only return context tags that are specified + context_tags = var.context_tags_enabled ? { + for k, v in module.this.tags : + lower(k) => v + if contains(var.context_tags, lower(k)) + } : {} + + # For deep merge + context_tags_in_tags = var.context_tags_enabled ? { + tags = local.context_tags + } : {} + + synthetics_merged = { + for k, v in module.datadog_synthetics_merge : + k => v.merged + } +} + +# Convert all Datadog synthetics from YAML config to Terraform map +module "datadog_synthetics_yaml_config" { + source = "cloudposse/config/yaml" + version = "1.0.2" + + map_config_local_base_path = path.module + map_config_paths = var.synthetics_paths + + parameters = merge(var.config_parameters, local.context_tags) + + context = module.this.context +} + +module "datadog_synthetics_merge" { + source = "cloudposse/config/yaml//modules/deepmerge" + version = "1.0.2" + + for_each = local.enabled ? module.datadog_synthetics_yaml_config.map_configs : {} + + # Merge in order: 1) datadog synthetics, datadog synthetics globals, context tags + maps = [ + each.value, + var.datadog_synthetics_globals, + local.context_tags_in_tags + ] +} + +module "datadog_synthetics" { + source = "cloudposse/platform/datadog//modules/synthetics" + version = "1.3.0" + + # Disable default tags because we manage them ourselves in this module, because we want to make them lowercase. + default_tags_enabled = false + datadog_synthetics = local.synthetics_merged + + locations = distinct(compact(concat( + var.locations, + [local.datadog_synthetics_private_location_id] + ))) + + alert_tags = var.alert_tags + alert_tags_separator = var.alert_tags_separator + + context = module.this.context +} diff --git a/modules/datadog-synthetics/outputs.tf b/modules/datadog-synthetics/outputs.tf new file mode 100644 index 000000000..6b6c811d6 --- /dev/null +++ b/modules/datadog-synthetics/outputs.tf @@ -0,0 +1,19 @@ +output "datadog_synthetics_test_names" { + value = module.datadog_synthetics.datadog_synthetics_test_names + description = "Names of the created Datadog synthetic tests" +} + +output "datadog_synthetics_test_ids" { + value = module.datadog_synthetics.datadog_synthetics_test_ids + description = "IDs of the created Datadog synthetic tests" +} + +output "datadog_synthetics_test_monitor_ids" { + value = module.datadog_synthetics.datadog_synthetics_test_monitor_ids + description = "IDs of the monitors associated with the Datadog synthetics tests" +} + +output "datadog_synthetics_test_maps" { + value = { for v in module.datadog_synthetics.datadog_synthetic_tests : v.name => v.id } + description = "Map (name: id) of the created Datadog synthetic tests" +} diff --git a/modules/datadog-synthetics/provider-datadog.tf b/modules/datadog-synthetics/provider-datadog.tf new file mode 100644 index 000000000..0b4e862f8 --- /dev/null +++ b/modules/datadog-synthetics/provider-datadog.tf @@ -0,0 +1,12 @@ +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = local.enabled +} diff --git a/modules/datadog-synthetics/providers.tf b/modules/datadog-synthetics/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/datadog-synthetics/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/datadog-synthetics/remote-state.tf b/modules/datadog-synthetics/remote-state.tf new file mode 100644 index 000000000..2abaaedc8 --- /dev/null +++ b/modules/datadog-synthetics/remote-state.tf @@ -0,0 +1,15 @@ +module "datadog_synthetics_private_location" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.synthetics_private_location_component_name + + bypass = !local.enabled || !var.private_location_test_enabled + ignore_errors = !var.private_location_test_enabled + + defaults = { + synthetics_private_location_id = "" + } + + context = module.this.context +} diff --git a/modules/datadog-synthetics/variables.tf b/modules/datadog-synthetics/variables.tf new file mode 100644 index 000000000..3cda7a56c --- /dev/null +++ b/modules/datadog-synthetics/variables.tf @@ -0,0 +1,63 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "synthetics_paths" { + type = list(string) + description = "List of paths to Datadog synthetic test configurations" +} + +variable "alert_tags" { + type = list(string) + description = "List of alert tags to add to all alert messages, e.g. `[\"@opsgenie\"]` or `[\"@devops\", \"@opsgenie\"]`" + default = null +} + +variable "alert_tags_separator" { + type = string + description = "Separator for the alert tags. All strings from the `alert_tags` variable will be joined into one string using the separator and then added to the alert message" + default = "\n" +} + +variable "context_tags_enabled" { + type = bool + description = "Whether to add context tags to add to each synthetic check" + default = true +} + +variable "context_tags" { + type = set(string) + description = "List of context tags to add to each synthetic check" + default = ["namespace", "tenant", "environment", "stage"] +} + +variable "config_parameters" { + type = map(any) + description = "Map of parameter values to interpolate into Datadog Synthetic configurations" + default = {} +} + +variable "datadog_synthetics_globals" { + type = any + description = "Map of keys to add to every monitor" + default = {} +} + +variable "locations" { + type = list(string) + description = "Array of locations used to run synthetic tests" + default = [] +} + +variable "private_location_test_enabled" { + type = bool + description = "Use private locations or the public locations provided by datadog" + default = false +} + +variable "synthetics_private_location_component_name" { + type = string + description = "The name of the Datadog synthetics private location component" + default = null +} diff --git a/modules/datadog-synthetics/versions.tf b/modules/datadog-synthetics/versions.tf new file mode 100644 index 000000000..20f566652 --- /dev/null +++ b/modules/datadog-synthetics/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + datadog = { + source = "datadog/datadog" + version = ">= 3.3.0" + } + } +} diff --git a/modules/dms/endpoint/README.md b/modules/dms/endpoint/README.md index 2892afc11..a395212ff 100644 --- a/modules/dms/endpoint/README.md +++ b/modules/dms/endpoint/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/dms/endpoint + - layer/unassigned + - provider/aws +--- + # Component: `dms/endpoint` This component provisions DMS endpoints. @@ -69,17 +76,20 @@ components: - target ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.2.0 | +| [terraform](#requirement\_terraform) | >= 1.0 | | [aws](#requirement\_aws) | >= 4.26.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.26.0 | ## Modules @@ -91,7 +101,10 @@ No providers. ## Resources -No resources. +| Name | Type | +|------|------| +| [aws_ssm_parameter.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.username](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | ## Inputs @@ -111,8 +124,6 @@ No resources. | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [extra\_connection\_attributes](#input\_extra\_connection\_attributes) | Additional attributes associated with the connection to the source database | `string` | `""` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kafka\_settings](#input\_kafka\_settings) | Configuration block for Kafka settings | `map(any)` | `null` | no | | [kinesis\_settings](#input\_kinesis\_settings) | Configuration block for Kinesis settings | `map(any)` | `null` | no | | [kms\_key\_arn](#input\_kms\_key\_arn) | (Required when engine\_name is `mongodb`, optional otherwise). ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key | `string` | `null` | no | @@ -123,7 +134,8 @@ No resources. | [mongodb\_settings](#input\_mongodb\_settings) | Configuration block for MongoDB settings | `map(any)` | `null` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [password](#input\_password) | Password to be used to login to the endpoint database | `string` | `null` | no | +| [password](#input\_password) | Password to be used to login to the endpoint database | `string` | `""` | no | +| [password\_path](#input\_password\_path) | If set, the path in AWS SSM Parameter Store to fetch the password for the DMS admin user | `string` | `""` | no | | [port](#input\_port) | Port used by the endpoint database | `number` | `null` | no | | [redshift\_settings](#input\_redshift\_settings) | Configuration block for Redshift settings | `map(any)` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | @@ -137,7 +149,8 @@ No resources. | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [username](#input\_username) | User name to be used to login to the endpoint database | `string` | `null` | no | +| [username](#input\_username) | User name to be used to login to the endpoint database | `string` | `""` | no | +| [username\_path](#input\_username\_path) | If set, the path in AWS SSM Parameter Store to fetch the username for the DMS admin user | `string` | `""` | no | ## Outputs @@ -146,10 +159,11 @@ No resources. | [dms\_endpoint\_arn](#output\_dms\_endpoint\_arn) | DMS endpoint ARN | | [dms\_endpoint\_id](#output\_dms\_endpoint\_id) | DMS endpoint ID | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dms/modules/dms-endpoint) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dms/modules/dms-endpoint) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/dms/endpoint/main.tf b/modules/dms/endpoint/main.tf index cdf92bf4b..0e2b52f52 100644 --- a/modules/dms/endpoint/main.tf +++ b/modules/dms/endpoint/main.tf @@ -1,3 +1,18 @@ +locals { + fetch_username = !(length(var.username) > 0) && (length(var.username_path) > 0) ? true : false + fetch_password = !(length(var.password) > 0) && (length(var.password_path) > 0) ? true : false +} + +data "aws_ssm_parameter" "username" { + count = local.fetch_username ? 1 : 0 + name = var.username_path +} + +data "aws_ssm_parameter" "password" { + count = local.fetch_password ? 1 : 0 + name = var.password_path +} + module "dms_endpoint" { source = "cloudposse/dms/aws//modules/dms-endpoint" version = "0.1.1" @@ -7,7 +22,6 @@ module "dms_endpoint" { kms_key_arn = var.kms_key_arn certificate_arn = var.certificate_arn database_name = var.database_name - password = var.password port = var.port extra_connection_attributes = var.extra_connection_attributes secrets_manager_access_role_arn = var.secrets_manager_access_role_arn @@ -15,7 +29,8 @@ module "dms_endpoint" { server_name = var.server_name service_access_role = var.service_access_role ssl_mode = var.ssl_mode - username = var.username + username = local.fetch_username ? data.aws_ssm_parameter.username[0].value : var.username + password = local.fetch_password ? data.aws_ssm_parameter.password[0].value : var.password elasticsearch_settings = var.elasticsearch_settings kafka_settings = var.kafka_settings kinesis_settings = var.kinesis_settings diff --git a/modules/dms/endpoint/providers.tf b/modules/dms/endpoint/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/dms/endpoint/providers.tf +++ b/modules/dms/endpoint/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/dms/endpoint/variables.tf b/modules/dms/endpoint/variables.tf index 2bc19a1e7..e02be501d 100644 --- a/modules/dms/endpoint/variables.tf +++ b/modules/dms/endpoint/variables.tf @@ -34,7 +34,7 @@ variable "database_name" { variable "password" { type = string description = "Password to be used to login to the endpoint database" - default = null + default = "" } variable "port" { @@ -82,7 +82,7 @@ variable "ssl_mode" { variable "username" { type = string description = "User name to be used to login to the endpoint database" - default = null + default = "" } variable "elasticsearch_settings" { @@ -120,3 +120,15 @@ variable "s3_settings" { description = "Configuration block for S3 settings" default = null } + +variable "username_path" { + type = string + description = "If set, the path in AWS SSM Parameter Store to fetch the username for the DMS admin user" + default = "" +} + +variable "password_path" { + type = string + description = "If set, the path in AWS SSM Parameter Store to fetch the password for the DMS admin user" + default = "" +} diff --git a/modules/dms/endpoint/versions.tf b/modules/dms/endpoint/versions.tf index 463b50e1d..d8daf2ae0 100644 --- a/modules/dms/endpoint/versions.tf +++ b/modules/dms/endpoint/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.2.0" + required_version = ">= 1.0" required_providers { aws = { diff --git a/modules/dms/iam/README.md b/modules/dms/iam/README.md index fe9d4f9ab..b1d5ec321 100644 --- a/modules/dms/iam/README.md +++ b/modules/dms/iam/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/dms/iam + - layer/unassigned + - provider/aws +--- + # Component: `dms/iam` This component provisions IAM roles required for DMS. @@ -23,14 +30,14 @@ components: name: dms ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [source](#requirement\_source) | hashicorp/aws | -| [version](#requirement\_version) | >= 4.26.0 | +| [aws](#requirement\_aws) | >= 4.26.0 | ## Providers @@ -60,8 +67,6 @@ No resources. | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -82,10 +87,11 @@ No resources. | [dms\_redshift\_s3\_role\_arn](#output\_dms\_redshift\_s3\_role\_arn) | DMS Redshift S3 role ARN | | [dms\_vpc\_management\_role\_arn](#output\_dms\_vpc\_management\_role\_arn) | DMS VPC management role ARN | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dms/modules/dms-iam) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dms/modules/dms-iam) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/dms/iam/providers.tf b/modules/dms/iam/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/dms/iam/providers.tf +++ b/modules/dms/iam/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/dms/iam/versions.tf b/modules/dms/iam/versions.tf index 1150e407f..d8daf2ae0 100644 --- a/modules/dms/iam/versions.tf +++ b/modules/dms/iam/versions.tf @@ -2,12 +2,14 @@ terraform { required_version = ">= 1.0" required_providers { - source = "hashicorp/aws" - # Using the latest version of the provider since the earlier versions had many issues with DMS replication tasks. - # In particular: - # https://github.com/hashicorp/terraform-provider-aws/pull/24047 - # https://github.com/hashicorp/terraform-provider-aws/pull/23692 - # https://github.com/hashicorp/terraform-provider-aws/pull/13476 - version = ">= 4.26.0" + aws = { + source = "hashicorp/aws" + # Using the latest version of the provider since the earlier versions had many issues with DMS replication tasks. + # In particular: + # https://github.com/hashicorp/terraform-provider-aws/pull/24047 + # https://github.com/hashicorp/terraform-provider-aws/pull/23692 + # https://github.com/hashicorp/terraform-provider-aws/pull/13476 + version = ">= 4.26.0" + } } } diff --git a/modules/dms/replication-instance/README.md b/modules/dms/replication-instance/README.md index 6f206b6a5..b48146975 100644 --- a/modules/dms/replication-instance/README.md +++ b/modules/dms/replication-instance/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/dms/replication-instance + - layer/unassigned + - provider/aws +--- + # Component: `dms/replication-instance` This component provisions DMS replication instances. @@ -42,6 +49,7 @@ components: allocated_storage: 50 ``` + ## Requirements @@ -62,7 +70,7 @@ No providers. | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [security\_group](#module\_security\_group) | cloudposse/security-group/aws | 1.0.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -86,8 +94,6 @@ No resources. | [engine\_version](#input\_engine\_version) | The engine version number of the replication instance | `string` | `"3.4"` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -116,10 +122,11 @@ No resources. | [dms\_replication\_instance\_arn](#output\_dms\_replication\_instance\_arn) | DMS replication instance ARN | | [dms\_replication\_instance\_id](#output\_dms\_replication\_instance\_id) | DMS replication instance ID | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dms/modules/dms-replication-instance) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dms/modules/dms-replication-instance) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/dms/replication-instance/providers.tf b/modules/dms/replication-instance/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/dms/replication-instance/providers.tf +++ b/modules/dms/replication-instance/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/dms/replication-instance/remote-state.tf b/modules/dms/replication-instance/remote-state.tf index 1b1079219..757ef9067 100644 --- a/modules/dms/replication-instance/remote-state.tf +++ b/modules/dms/replication-instance/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "vpc" diff --git a/modules/dms/replication-task/README.md b/modules/dms/replication-task/README.md index 681ec311c..294345780 100644 --- a/modules/dms/replication-task/README.md +++ b/modules/dms/replication-task/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/dms/replication-task + - layer/unassigned + - provider/aws +--- + # Component: `dms/replication-task` This component provisions DMS replication tasks. @@ -37,6 +44,7 @@ components: table_mappings_file: "config/replication-task-table-mappings-example.json" ``` + ## Requirements @@ -53,9 +61,9 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [dms\_endpoint\_source](#module\_dms\_endpoint\_source) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [dms\_endpoint\_target](#module\_dms\_endpoint\_target) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [dms\_replication\_instance](#module\_dms\_replication\_instance) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [dms\_endpoint\_source](#module\_dms\_endpoint\_source) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [dms\_endpoint\_target](#module\_dms\_endpoint\_target) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [dms\_replication\_instance](#module\_dms\_replication\_instance) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [dms\_replication\_task](#module\_dms\_replication\_task) | cloudposse/dms/aws//modules/dms-replication-task | 0.1.1 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -78,8 +86,6 @@ No resources. | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -106,10 +112,11 @@ No resources. | [dms\_replication\_task\_arn](#output\_dms\_replication\_task\_arn) | DMS replication task ARN | | [dms\_replication\_task\_id](#output\_dms\_replication\_task\_id) | DMS replication task ID | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dms/modules/dms-replication-task) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dms/modules/dms-replication-task) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/dms/replication-task/main.tf b/modules/dms/replication-task/main.tf index 239e8167f..cb6801c1e 100644 --- a/modules/dms/replication-task/main.tf +++ b/modules/dms/replication-task/main.tf @@ -2,9 +2,9 @@ module "dms_replication_task" { source = "cloudposse/dms/aws//modules/dms-replication-task" version = "0.1.1" - replication_instance_arn = module.dms_replication_instance.outputs.replication_instance_arn - source_endpoint_arn = module.dms_endpoint_source.outputs.endpoint_arn - target_endpoint_arn = module.dms_endpoint_target.outputs.endpoint_arn + replication_instance_arn = module.dms_replication_instance.outputs.dms_replication_instance_arn + source_endpoint_arn = module.dms_endpoint_source.outputs.dms_endpoint_arn + target_endpoint_arn = module.dms_endpoint_target.outputs.dms_endpoint_arn start_replication_task = var.start_replication_task migration_type = var.migration_type diff --git a/modules/dms/replication-task/providers.tf b/modules/dms/replication-task/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/dms/replication-task/providers.tf +++ b/modules/dms/replication-task/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/dms/replication-task/remote-state.tf b/modules/dms/replication-task/remote-state.tf index 2710ac5eb..fdbd10b07 100644 --- a/modules/dms/replication-task/remote-state.tf +++ b/modules/dms/replication-task/remote-state.tf @@ -1,6 +1,6 @@ module "dms_replication_instance" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.replication_instance_component_name @@ -9,7 +9,7 @@ module "dms_replication_instance" { module "dms_endpoint_source" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.source_endpoint_component_name @@ -18,7 +18,7 @@ module "dms_endpoint_source" { module "dms_endpoint_target" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.target_endpoint_component_name diff --git a/modules/dns-delegated/README.md b/modules/dns-delegated/README.md index 057179941..c39b1710d 100644 --- a/modules/dns-delegated/README.md +++ b/modules/dns-delegated/README.md @@ -1,6 +1,15 @@ +--- +tags: + - component/dns-delegated + - layer/network + - provider/aws +--- + # Component: `dns-delegated` -This component is responsible for provisioning a DNS zone which delegates nameservers to the DNS zone in the primary DNS account. The primary DNS zone is expected to already be provisioned via [the `dns-primary` component](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dns-primary). +This component is responsible for provisioning a DNS zone which delegates nameservers to the DNS zone in the primary DNS +account. The primary DNS zone is expected to already be provisioned via +[the `dns-primary` component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dns-primary). This component also provisions a wildcard ACM certificate for the given subdomain. @@ -8,9 +17,12 @@ This component also provisions a wildcard ACM certificate for the given subdomai **Stack Level**: Global or Regional -Here's an example snippet for how to use this component. Use this component in global or regional stacks for any accounts where you host services that need DNS records on a given subdomain (e.g. delegated zone) of the root domain (e.g. primary zone). +Here's an example snippet for how to use this component. Use this component in global or regional stacks for any +accounts where you host services that need DNS records on a given subdomain (e.g. delegated zone) of the root domain +(e.g. primary zone). -Public Hosted Zone `devplatform.example.net` will be created and `example.net` HZ in the dns primary account will contain a record delegating DNS to the new HZ +Public Hosted Zone `devplatform.example.net` will be created and `example.net` HZ in the dns primary account will +contain a record delegating DNS to the new HZ This will create an ACM record @@ -20,23 +32,23 @@ components: dns-delegated: vars: zone_config: - - subdomain: devplatform - zone_name: example.net + - subdomain: devplatform + zone_name: example.net request_acm_certificate: true dns_private_zone_enabled: false # dns_soa_config configures the SOA record for the zone:: # - awsdns-hostmaster.amazon.com. ; AWS default value for administrator email address # - 1 ; serial number, not used by AWS - # - 7200 ; refresh time in seconds for secondary DNS servers to refreh SOA record + # - 7200 ; refresh time in seconds for secondary DNS servers to refresh SOA record # - 900 ; retry time in seconds for secondary DNS servers to retry failed SOA record update # - 1209600 ; expire time in seconds (1209600 is 2 weeks) for secondary DNS servers to remove SOA record if they cannot refresh it # - 60 ; nxdomain TTL, or time in seconds for secondary DNS servers to cache negative responses # See [SOA Record Documentation](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) for more information. dns_soa_config: "awsdns-hostmaster.amazon.com. 1 7200 900 1209600 60" - ``` -Private Hosted Zone `devplatform.example.net` will be created and `example.net` HZ in the dns primary account will contain a record delegating DNS to the new HZ +Private Hosted Zone `devplatform.example.net` will be created and `example.net` HZ in the dns primary account will +contain a record delegating DNS to the new HZ This will create an ACM record using a Private CA @@ -46,8 +58,8 @@ components: dns-delegated: vars: zone_config: - - subdomain: devplatform - zone_name: example.net + - subdomain: devplatform + zone_name: example.net request_acm_certificate: true dns_private_zone_enabled: true vpc_region_abbreviation_type: short @@ -60,13 +72,19 @@ components: ### Limitations -Switching a hosted zone from public to private can cause issues because the provider will try to do an update instead of a ForceNew. +Switching a hosted zone from public to private can cause issues because the provider will try to do an update instead of +a ForceNew. See: https://github.com/hashicorp/terraform-provider-aws/issues/7614 -It's not possible to toggle between public and private so if switching from public to private and downtime is acceptable, delete the records of the hosted zone, delete the hosted zone, destroy the terraform component, and deploy with the new settings. +It's not possible to toggle between public and private so if switching from public to private and downtime is +acceptable, delete the records of the hosted zone, delete the hosted zone, destroy the terraform component, and deploy +with the new settings. -NOTE: With each of these workarounds, you may have an issue connecting to the service specific provider e.g. for `auroro-postgres` you may get an error of the host set to `localhost` on the `postgresql` provider resulting in an error. To get around this, dump the endpoint using `atmos terraform show`, hardcode the `host` input on the provider, and re-run the apply. +NOTE: With each of these workarounds, you may have an issue connecting to the service specific provider e.g. for +`auroro-postgres` you may get an error of the host set to `localhost` on the `postgresql` provider resulting in an +error. To get around this, dump the endpoint using `atmos terraform show`, hardcode the `host` input on the provider, +and re-run the apply. #### Workaround if downtime is fine @@ -84,6 +102,53 @@ NOTE: With each of these workarounds, you may have an issue connecting to the se 1. Deploy the new dns-delegated-private component 1. Move aurora-postgres, msk, external-dns, echo-server, etc to the new hosted zone by re-deploying +## Caveats + +- Do not create a delegation for subdomain of a domain in a zone for which that zone is not authoritative for the + subdomain (usually because you already delegated a parent subdomain). Though Amazon Route 53 will allow you to, you + should not do it. For historic reasons, Route 53 Public DNS allows customers to create two NS delegations within a + hosted zone which creates a conflict (and can return either set to resolvers depending on the query). + +For example, in a single hosted zone with the domain name `example.com`, it is possible to create two NS delegations +which are parent and child of each other as follows: + +``` +a.example.com. 172800 IN NS ns-1084.awsdns-07.org. +a.example.com. 172800 IN NS ns-634.awsdns-15.net. +a.example.com. 172800 IN NS ns-1831.awsdns-36.co.uk. +a.example.com. 172800 IN NS ns-190.awsdns-23.com. + +b.a.example.com. 172800 IN NS ns-1178.awsdns-19.org. +b.a.example.com. 172800 IN NS ns-614.awsdns-12.net. +b.a.example.com. 172800 IN NS ns-1575.awsdns-04.co.uk. +b.a.example.com. 172800 IN NS ns-338.awsdns-42.com. +``` + +This configuration creates two discrete possible resolution paths. + +1. If a resolver directly queries the `example.com` nameservers for `c.b.a.example.com`, it will receive the second set + of nameservers. + +2. If a resolver queries `example.com` for `a.example.com`, it will receive the first set of nameservers. + +If the resolver then proceeds to query the `a.example.com` nameservers for `c.b.a.example.com`, the response is driven +by the contents of the `a.example.com` zone, which may be different than the results returned by the `b.a.example.com` +nameservers. `c.b.a.example.com` may not have an entry in the `a.example.com` nameservers, resulting in an error +(`NXDOMAIN`) being returned. + +From 15th May 2020, Route 53 Resolver has been enabling a modern DNS resolver standard called "QName Minimization"[*]. +This change causes the resolver to more strictly use recursion path [2] described above where path [1] was common +before. [*] [https://tools.ietf.org/html/rfc7816](https://tools.ietf.org/html/rfc7816) + +As of January 2022, you can observe the different query strategies in use by Google DNS at `8.8.8.8` (strategy 1) and +Cloudflare DNS at `1.1.1.1` (strategy 2). You should verify that both DNS servers resolve your host records properly. + +Takeaway + +1. In order to ensure DNS resolution is consistent no matter the resolver, it is important to always create NS + delegations only authoritative zones. + + ## Requirements @@ -105,10 +170,10 @@ NOTE: With each of these workarounds, you may have an issue connecting to the se |------|--------|---------| | [acm](#module\_acm) | cloudposse/acm-request-certificate/aws | 0.17.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [private\_ca](#module\_private\_ca) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.0 | +| [private\_ca](#module\_private\_ca) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [utils](#module\_utils) | cloudposse/utils/aws | 1.1.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.0 | +| [utils](#module\_utils) | cloudposse/utils/aws | 1.3.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -140,12 +205,10 @@ NOTE: With each of these workarounds, you may have an issue connecting to the se | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [dns\_private\_zone\_enabled](#input\_dns\_private\_zone\_enabled) | Whether to set the zone to public or private | `bool` | `false` | no | -| [dns\_soa\_config](#input\_dns\_soa\_config) | Root domain name DNS SOA record:
- awsdns-hostmaster.amazon.com. ; AWS default value for administrator email address
- 1 ; serial number, not used by AWS
- 7200 ; refresh time in seconds for secondary DNS servers to refreh SOA record
- 900 ; retry time in seconds for secondary DNS servers to retry failed SOA record update
- 1209600 ; expire time in seconds (1209600 is 2 weeks) for secondary DNS servers to remove SOA record if they cannot refresh it
- 60 ; nxdomain TTL, or time in seconds for secondary DNS servers to cache negative responses
See [SOA Record Documentation](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) for more information. | `string` | `"awsdns-hostmaster.amazon.com. 1 7200 900 1209600 60"` | no | +| [dns\_soa\_config](#input\_dns\_soa\_config) | Root domain name DNS SOA record:
- awsdns-hostmaster.amazon.com. ; AWS default value for administrator email address
- 1 ; serial number, not used by AWS
- 7200 ; refresh time in seconds for secondary DNS servers to refresh SOA record
- 900 ; retry time in seconds for secondary DNS servers to retry failed SOA record update
- 1209600 ; expire time in seconds (1209600 is 2 weeks) for secondary DNS servers to remove SOA record if they cannot refresh it
- 60 ; nxdomain TTL, or time in seconds for secondary DNS servers to cache negative responses
See [SOA Record Documentation](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) for more information. | `string` | `"awsdns-hostmaster.amazon.com. 1 7200 900 1209600 60"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -174,10 +237,11 @@ NOTE: With each of these workarounds, you may have an issue connecting to the se | [route53\_hosted\_zone\_protections](#output\_route53\_hosted\_zone\_protections) | List of AWS Shield Advanced Protections for Route53 Hosted Zones. | | [zones](#output\_zones) | Subdomain and zone config | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dns-delegated) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dns-delegated) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/dns-delegated/main.tf b/modules/dns-delegated/main.tf index 35390bf03..57df4037f 100644 --- a/modules/dns-delegated/main.tf +++ b/modules/dns-delegated/main.tf @@ -55,7 +55,7 @@ resource "aws_route53_zone" "private" { module "utils" { source = "cloudposse/utils/aws" - version = "1.1.0" + version = "1.3.0" } resource "aws_route53_zone_association" "secondary" { @@ -76,7 +76,7 @@ resource "aws_shield_protection" "shield_protection" { name = local.aws_route53_zone[each.key].name resource_arn = format("arn:%s:route53:::hostedzone/%s", local.aws_partition, local.aws_route53_zone[each.key].id) - tags = module.this.context + tags = module.this.tags } resource "aws_route53_record" "soa" { diff --git a/modules/dns-delegated/providers-dns-primary.tf b/modules/dns-delegated/providers-dns-primary.tf new file mode 100644 index 000000000..76fee705b --- /dev/null +++ b/modules/dns-delegated/providers-dns-primary.tf @@ -0,0 +1,16 @@ +provider "aws" { + # The AWS provider to use to make changes in the DNS primary account + alias = "primary" + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.dns_terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.dns_terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.dns_terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/dns-delegated/providers.tf b/modules/dns-delegated/providers.tf index dd2b4a1fb..ef923e10a 100644 --- a/modules/dns-delegated/providers.tf +++ b/modules/dns-delegated/providers.tf @@ -1,28 +1,14 @@ provider "aws" { - # The AWS provider to use to make changes in the DNS primary account - alias = "primary" region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.dns_terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = var.import_role_arn == null ? (module.iam_roles.dns_terraform_role_arn != null ? [true] : []) : ["import"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.dns_terraform_role_arn) - } - } -} - -provider "aws" { - # The AWS provider to use to make changes in the target (delegated) account - region = var.region - - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null - - dynamic "assume_role" { - for_each = var.import_role_arn == null ? (module.iam_roles.terraform_role_arn != null ? [true] : []) : ["import"] - content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -31,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/dns-delegated/remote-state.tf b/modules/dns-delegated/remote-state.tf index bf10b0363..e920505e8 100644 --- a/modules/dns-delegated/remote-state.tf +++ b/modules/dns-delegated/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.0" + version = "1.5.0" for_each = local.private_enabled ? local.vpc_environment_names : toset([]) @@ -12,7 +12,7 @@ module "vpc" { module "private_ca" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.0" + version = "1.5.0" count = local.private_ca_enabled && local.certificate_enabled ? 1 : 0 diff --git a/modules/dns-delegated/variables.tf b/modules/dns-delegated/variables.tf index 7fa904eb0..209ececa7 100644 --- a/modules/dns-delegated/variables.tf +++ b/modules/dns-delegated/variables.tf @@ -81,7 +81,7 @@ variable "dns_soa_config" { Root domain name DNS SOA record: - awsdns-hostmaster.amazon.com. ; AWS default value for administrator email address - 1 ; serial number, not used by AWS - - 7200 ; refresh time in seconds for secondary DNS servers to refreh SOA record + - 7200 ; refresh time in seconds for secondary DNS servers to refresh SOA record - 900 ; retry time in seconds for secondary DNS servers to retry failed SOA record update - 1209600 ; expire time in seconds (1209600 is 2 weeks) for secondary DNS servers to remove SOA record if they cannot refresh it - 60 ; nxdomain TTL, or time in seconds for secondary DNS servers to cache negative responses @@ -89,4 +89,3 @@ variable "dns_soa_config" { EOT default = "awsdns-hostmaster.amazon.com. 1 7200 900 1209600 60" } - diff --git a/modules/dns-primary/README.md b/modules/dns-primary/README.md index 63cbe34eb..fe5e876cb 100644 --- a/modules/dns-primary/README.md +++ b/modules/dns-primary/README.md @@ -1,15 +1,66 @@ +--- +tags: + - component/dns-primary + - layer/network + - provider/aws +--- + # Component: `dns-primary` -This component is responsible for provisioning the primary DNS zones into an AWS account. By convention, we typically provision the primary DNS zones in the `dns` account. The primary account for branded zones (e.g. `example.com`), however, would be in the `prod` account, while staging zone (e.g. `example.qa`) might be in the `staging` account. +This component is responsible for provisioning the primary DNS zones into an AWS account. By convention, we typically +provision the primary DNS zones in the `dns` account. The primary account for branded zones (e.g. `example.com`), +however, would be in the `prod` account, while staging zone (e.g. `example.qa`) might be in the `staging` account. + +The zones from the primary DNS zone are then expected to be delegated to other accounts via +[the `dns-delegated` component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dns-delegated). +Additionally, external records can be created on the primary DNS zones via the `record_config` variable. + +## Architecture + +### Summary + +The `dns` account gets a single `dns-primary` component deployed. Every other account that needs DNS entries gets a +single `dns-delegated` component, chaining off the domains in the `dns` account. Optionally, accounts can have a single +`dns-primary` component of their own, to have apex domains (which Cloud Posse calls "vanity domains"). Typically, these +domains are configured with CNAME (or apex alias) records to point to service domain entries. + +### Details + +The purpose of the `dns` account is to host root domains shared by several accounts (with each account being delegated +its own subdomain) and to be the owner of domain registrations purchased from Amazon. + +The purpose of the `dns-primary` component is to provision AWS Route53 zones for the root domains. These zones, once +provisioned, must be manually configured into the Domain Name Registrar's records as name servers. A single component +can provision multiple domains and, optionally, associated ACM (SSL) certificates in a single account. + +Cloud Posse's architecture expects root domains shared by several accounts to be provisioned in the `dns` account with +`dns-primary` and delegated to other accounts using the `dns-delegated` component, with each account getting its own +subdomain corresponding to a Route 53 zone in the delegated account. Cloud Posse's architecture requires at least one +such domain, called "the service domain", be provisioned. The service domain is not customer facing, and is provisioned +to allow fully automated construction of host names without any concerns about how they look. Although they are not +secret, the public will never see them. + +Root domains used by a single account are provisioned with the `dns-primary` component directly in that account. Cloud +Posse calls these "vanity domains". These can be whatever the marketing or PR or other stakeholders want to be. + +After a domain is provisioned in the `dns` account, the `dns-delegated` component can provision one or more subdomains +for each account, and, optionally, associated ACM certificates. For the service domain, Cloud Posse recommends using the +account name as the delegated subdomain (either directly, e.g. "plat-dev", or as multiple subdomains, e.g. "dev.plat") +because that allows `dns-delegated` to automatically provision any required host name in that zone. -The zones from the primary DNS zone are then expected to be delegated to other accounts via [the `dns-delegated` component](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dns-delegated). Additionally, external records can be created on the primary DNS zones via the `record_config` variable. +There is no automated support for `dns-primary` to provision root domains outside of the `dns` account that are to be +shared by multiple accounts, and such usage is not recommended. If you must, `dns-primary` can provision a subdomain of +a root domain that is provisioned in another account (not `dns`). In this case, the delegation of the subdomain must be +done manually by entering the name servers into the parent domain's records (instead of in the Registrar's records). +The architecture does not support other configurations, or non-standard component names. ## Usage **Stack Level**: Global -Here's an example snippet for how to use this component. This component should only be applied once as the DNS zones it creates are global. This is typically done via the DNS stack (e.g. `gbl-dns.yaml`). +Here's an example snippet for how to use this component. This component should only be applied once as the DNS zones it +creates are global. This is typically done via the DNS stack (e.g. `gbl-dns.yaml`). ```yaml components: @@ -25,14 +76,36 @@ components: ttl: 60 records: - 53.229.170.215 + # using a period at the end of a name - root_zone: example.net - name: www + name: www. type: CNAME ttl: 60 records: - example.net + # using numbers as name requires quotes + - root_zone: example.net + name: "123456." + type: CNAME + ttl: 60 + records: + - example.net + # strings that are very long, this could be a DKIM key + - root_zone: example.net + name: service._domainkey. + type: CNAME + ttl: 60 + records: + - !!str |- + YourVeryLongStringGoesHere ``` +> [!TIP] +> +> Use the [acm](https://docs.cloudposse.com/components/library/aws/acm) component for more advanced certificate +> requirements. + + ## Requirements @@ -51,7 +124,7 @@ components: | Name | Source | Version | |------|--------|---------| -| [acm](#module\_acm) | cloudposse/acm-request-certificate/aws | 0.17.0 | +| [acm](#module\_acm) | cloudposse/acm-request-certificate/aws | 0.16.3 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -74,13 +147,11 @@ components: | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [dns\_soa\_config](#input\_dns\_soa\_config) | Root domain name DNS SOA record:
- awsdns-hostmaster.amazon.com. ; AWS default value for administrator email address
- 1 ; serial number, not used by AWS
- 7200 ; refresh time in seconds for secondary DNS servers to refreh SOA record
- 900 ; retry time in seconds for secondary DNS servers to retry failed SOA record update
- 1209600 ; expire time in seconds (1209600 is 2 weeks) for secondary DNS servers to remove SOA record if they cannot refresh it
- 60 ; nxdomain TTL, or time in seconds for secondary DNS servers to cache negative responses
See [SOA Record Documentation](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) for more information. | `string` | `"awsdns-hostmaster.amazon.com. 1 7200 900 1209600 60"` | no | +| [dns\_soa\_config](#input\_dns\_soa\_config) | Root domain name DNS SOA record:
- awsdns-hostmaster.amazon.com. ; AWS default value for administrator email address
- 1 ; serial number, not used by AWS
- 7200 ; refresh time in seconds for secondary DNS servers to refresh SOA record
- 900 ; retry time in seconds for secondary DNS servers to retry failed SOA record update
- 1209600 ; expire time in seconds (1209600 is 2 weeks) for secondary DNS servers to remove SOA record if they cannot refresh it
- 60 ; nxdomain TTL, or time in seconds for secondary DNS servers to cache negative responses
See [SOA Record Documentation](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) for more information. | `string` | `"awsdns-hostmaster.amazon.com. 1 7200 900 1209600 60"` | no | | [domain\_names](#input\_domain\_names) | Root domain name list, e.g. `["example.net"]` | `list(string)` | `null` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -102,8 +173,11 @@ components: | [acms](#output\_acms) | ACM certificates for domains | | [zones](#output\_zones) | DNS zones | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dns-primary) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dns-primary) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/dns-primary/acm.tf b/modules/dns-primary/acm.tf index 013c319de..88fd79a1e 100644 --- a/modules/dns-primary/acm.tf +++ b/modules/dns-primary/acm.tf @@ -11,8 +11,9 @@ locals { module "acm" { for_each = local.domains_set - source = "cloudposse/acm-request-certificate/aws" - version = "0.17.0" + source = "cloudposse/acm-request-certificate/aws" + // Note: 0.17.0 is a 'preview' release, so we're using 0.16.2 + version = "0.16.3" enabled = local.certificate_enabled diff --git a/modules/dns-primary/providers.tf b/modules/dns-primary/providers.tf index 447c00328..ef923e10a 100644 --- a/modules/dns-primary/providers.tf +++ b/modules/dns-primary/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = var.import_role_arn == null ? (module.iam_roles.terraform_role_arn != null ? [true] : []) : ["import"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/dns-primary/variables.tf b/modules/dns-primary/variables.tf index 162dc0cf2..6aa801287 100644 --- a/modules/dns-primary/variables.tf +++ b/modules/dns-primary/variables.tf @@ -42,7 +42,7 @@ variable "dns_soa_config" { Root domain name DNS SOA record: - awsdns-hostmaster.amazon.com. ; AWS default value for administrator email address - 1 ; serial number, not used by AWS - - 7200 ; refresh time in seconds for secondary DNS servers to refreh SOA record + - 7200 ; refresh time in seconds for secondary DNS servers to refresh SOA record - 900 ; retry time in seconds for secondary DNS servers to retry failed SOA record update - 1209600 ; expire time in seconds (1209600 is 2 weeks) for secondary DNS servers to remove SOA record if they cannot refresh it - 60 ; nxdomain TTL, or time in seconds for secondary DNS servers to cache negative responses diff --git a/modules/documentdb/README.md b/modules/documentdb/README.md index d1bd87639..ebc5ee1ed 100644 --- a/modules/documentdb/README.md +++ b/modules/documentdb/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/documentdb + - layer/data + - provider/aws +--- + # Component: `documentdb` This component is responsible for provisioning DocumentDB clusters. @@ -24,6 +31,7 @@ components: retention_period: 35 ``` + ## Requirements @@ -44,13 +52,13 @@ components: | Name | Source | Version | |------|--------|---------| -| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | -| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [documentdb\_cluster](#module\_documentdb\_cluster) | cloudposse/documentdb-cluster/aws | 0.14.0 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -84,8 +92,6 @@ components: | [engine\_version](#input\_engine\_version) | The version number of the database engine to use | `string` | `"3.6.0"` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [instance\_class](#input\_instance\_class) | The instance class to use. For more details, see https://docs.aws.amazon.com/documentdb/latest/developerguide/db-instance-classes.html#db-instance-class-specs | `string` | `"db.r4.large"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | @@ -120,10 +126,11 @@ components: | [security\_group\_id](#output\_security\_group\_id) | ID of the DocumentDB cluster Security Group | | [security\_group\_name](#output\_security\_group\_name) | Name of the DocumentDB cluster Security Group | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/documentdb) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/documentdb) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/documentdb/default.auto.tfvars b/modules/documentdb/default.auto.tfvars deleted file mode 100644 index b6c99eb87..000000000 --- a/modules/documentdb/default.auto.tfvars +++ /dev/null @@ -1,5 +0,0 @@ -# This file is included by default in terraform plans - -enabled = true - -name = "documentdb" diff --git a/modules/documentdb/main.tf b/modules/documentdb/main.tf index c4971fdbc..378325fea 100644 --- a/modules/documentdb/main.tf +++ b/modules/documentdb/main.tf @@ -35,4 +35,4 @@ module "documentdb_cluster" { zone_id = try(module.dns_delegated.outputs.default_dns_zone_id, module.dns_gbl_delegated.outputs.default_dns_zone_id) context = module.this.context -} \ No newline at end of file +} diff --git a/modules/documentdb/providers.tf b/modules/documentdb/providers.tf index efa9ede5d..ef923e10a 100644 --- a/modules/documentdb/providers.tf +++ b/modules/documentdb/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/documentdb/remote-state.tf b/modules/documentdb/remote-state.tf index 838f7bd7d..68a715244 100644 --- a/modules/documentdb/remote-state.tf +++ b/modules/documentdb/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.5.0" component = "vpc" @@ -9,7 +9,7 @@ module "vpc" { module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.5.0" component = "eks" @@ -24,7 +24,7 @@ module "eks" { module "dns_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.5.0" component = "dns-delegated" @@ -33,7 +33,7 @@ module "dns_delegated" { module "dns_gbl_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.0" + version = "1.5.0" stack_config_local_path = "../../../stacks" component = "dns-delegated" diff --git a/modules/documentdb/ssm.tf b/modules/documentdb/ssm.tf index dd8837d8e..44e22b4c7 100644 --- a/modules/documentdb/ssm.tf +++ b/modules/documentdb/ssm.tf @@ -29,4 +29,4 @@ resource "aws_ssm_parameter" "master_password" { name = "/${module.this.name}/master_password" type = "SecureString" value = join("", random_password.master_password.*.result) -} \ No newline at end of file +} diff --git a/modules/documentdb/variables.tf b/modules/documentdb/variables.tf index f846ce9d9..d7e25d04c 100644 --- a/modules/documentdb/variables.tf +++ b/modules/documentdb/variables.tf @@ -120,4 +120,4 @@ variable "eks_security_group_ingress_enabled" { type = bool description = "Whether to add the Security Group managed by the EKS cluster in the same regional stack to the ingress allowlist of the DocumentDB cluster." default = true -} \ No newline at end of file +} diff --git a/modules/dynamodb/README.md b/modules/dynamodb/README.md index 4da60de42..df1d3ca2e 100644 --- a/modules/dynamodb/README.md +++ b/modules/dynamodb/README.md @@ -1,3 +1,11 @@ +--- +tags: + - component/dynamodb + - layer/data + - layer/gitops + - provider/aws +--- + # Component: `dynamodb` This component is responsible for provisioning a DynamoDB table. @@ -25,16 +33,16 @@ components: point_in_time_recovery_enabled: true streams_enabled: false ttl_enabled: false - ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | ## Providers @@ -44,7 +52,7 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [dynamodb\_table](#module\_dynamodb\_table) | cloudposse/dynamodb/aws | 0.29.2 | +| [dynamodb\_table](#module\_dynamodb\_table) | cloudposse/dynamodb/aws | 0.36.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -69,6 +77,7 @@ No resources. | [autoscaler\_tags](#input\_autoscaler\_tags) | Additional resource tags for the autoscaler module | `map(string)` | `{}` | no | | [billing\_mode](#input\_billing\_mode) | DynamoDB Billing mode. Can be PROVISIONED or PAY\_PER\_REQUEST | `string` | `"PROVISIONED"` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [deletion\_protection\_enabled](#input\_deletion\_protection\_enabled) | Enable/disable DynamoDB table deletion protection | `bool` | `false` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [dynamodb\_attributes](#input\_dynamodb\_attributes) | Additional DynamoDB attributes in the form of a list of mapped values |
list(object({
name = string
type = string
}))
| `[]` | no | @@ -79,7 +88,7 @@ No resources. | [hash\_key](#input\_hash\_key) | DynamoDB table Hash Key | `string` | n/a | yes | | [hash\_key\_type](#input\_hash\_key\_type) | Hash Key type, which must be a scalar type: `S`, `N`, or `B` for String, Number or Binary data, respectively. | `string` | `"S"` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [import\_table](#input\_import\_table) | Import Amazon S3 data into a new table. |
object({
# Valid values are GZIP, ZSTD and NONE
input_compression_type = optional(string, null)
# Valid values are CSV, DYNAMODB_JSON, and ION.
input_format = string
input_format_options = optional(object({
csv = object({
delimiter = string
header_list = list(string)
})
}), null)
s3_bucket_source = object({
bucket = string
bucket_owner = optional(string)
key_prefix = optional(string)
})
})
| `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -97,6 +106,7 @@ No resources. | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [stream\_view\_type](#input\_stream\_view\_type) | When an item in the table is modified, what information is written to the stream | `string` | `""` | no | | [streams\_enabled](#input\_streams\_enabled) | Enable DynamoDB streams | `bool` | `false` | no | +| [table\_name](#input\_table\_name) | Table name. If provided, the bucket will be created with this name instead of generating the name from the context | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [ttl\_attribute](#input\_ttl\_attribute) | DynamoDB table TTL attribute | `string` | `""` | no | @@ -107,17 +117,20 @@ No resources. | Name | Description | |------|-------------| | [global\_secondary\_index\_names](#output\_global\_secondary\_index\_names) | DynamoDB global secondary index names | +| [hash\_key](#output\_hash\_key) | DynamoDB table hash key | | [local\_secondary\_index\_names](#output\_local\_secondary\_index\_names) | DynamoDB local secondary index names | +| [range\_key](#output\_range\_key) | DynamoDB table range key | | [table\_arn](#output\_table\_arn) | DynamoDB table ARN | | [table\_id](#output\_table\_id) | DynamoDB table ID | | [table\_name](#output\_table\_name) | DynamoDB table name | | [table\_stream\_arn](#output\_table\_stream\_arn) | DynamoDB table stream ARN | | [table\_stream\_label](#output\_table\_stream\_label) | DynamoDB table stream label | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/dynamodb) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/dynamodb) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/dynamodb/default.auto.tfvars b/modules/dynamodb/default.auto.tfvars deleted file mode 100644 index 99105f049..000000000 --- a/modules/dynamodb/default.auto.tfvars +++ /dev/null @@ -1,5 +0,0 @@ -# This file is included by default in terraform plans - -enabled = true - -name = "dynamodb" diff --git a/modules/dynamodb/main.tf b/modules/dynamodb/main.tf index 93e4b376e..03982fb79 100644 --- a/modules/dynamodb/main.tf +++ b/modules/dynamodb/main.tf @@ -6,11 +6,13 @@ locals { module "dynamodb_table" { source = "cloudposse/dynamodb/aws" - version = "0.29.2" + version = "0.36.0" + table_name = var.table_name billing_mode = var.billing_mode replicas = var.replicas dynamodb_attributes = var.dynamodb_attributes + import_table = var.import_table global_secondary_index_map = var.global_secondary_index_map local_secondary_index_map = var.local_secondary_index_map @@ -41,5 +43,7 @@ module "dynamodb_table" { enable_point_in_time_recovery = var.point_in_time_recovery_enabled + deletion_protection_enabled = var.deletion_protection_enabled + context = module.this.context -} \ No newline at end of file +} diff --git a/modules/dynamodb/outputs.tf b/modules/dynamodb/outputs.tf index 9a51d6747..1126615db 100644 --- a/modules/dynamodb/outputs.tf +++ b/modules/dynamodb/outputs.tf @@ -31,4 +31,14 @@ output "table_stream_arn" { output "table_stream_label" { value = module.dynamodb_table.table_stream_label description = "DynamoDB table stream label" -} \ No newline at end of file +} + +output "hash_key" { + value = var.hash_key + description = "DynamoDB table hash key" +} + +output "range_key" { + value = var.range_key + description = "DynamoDB table range key" +} diff --git a/modules/dynamodb/providers.tf b/modules/dynamodb/providers.tf index c6e854450..ef923e10a 100644 --- a/modules/dynamodb/providers.tf +++ b/modules/dynamodb/providers.tf @@ -1,15 +1,19 @@ provider "aws" { - region = var.region - profile = coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } } module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} diff --git a/modules/dynamodb/variables.tf b/modules/dynamodb/variables.tf index 43bfb0bcf..a3ea0d22b 100644 --- a/modules/dynamodb/variables.tf +++ b/modules/dynamodb/variables.tf @@ -128,6 +128,12 @@ variable "autoscaler_tags" { description = "Additional resource tags for the autoscaler module" } +variable "table_name" { + type = string + default = null + description = "Table name. If provided, the bucket will be created with this name instead of generating the name from the context" +} + variable "dynamodb_attributes" { type = list(object({ name = string @@ -166,4 +172,32 @@ variable "replicas" { type = list(string) default = [] description = "List of regions to create a replica table in" -} \ No newline at end of file +} + +variable "deletion_protection_enabled" { + type = bool + default = false + description = "Enable/disable DynamoDB table deletion protection" +} + +variable "import_table" { + type = object({ + # Valid values are GZIP, ZSTD and NONE + input_compression_type = optional(string, null) + # Valid values are CSV, DYNAMODB_JSON, and ION. + input_format = string + input_format_options = optional(object({ + csv = object({ + delimiter = string + header_list = list(string) + }) + }), null) + s3_bucket_source = object({ + bucket = string + bucket_owner = optional(string) + key_prefix = optional(string) + }) + }) + default = null + description = "Import Amazon S3 data into a new table." +} diff --git a/modules/dynamodb/versions.tf b/modules/dynamodb/versions.tf index d5cde7755..cc73ffd35 100644 --- a/modules/dynamodb/versions.tf +++ b/modules/dynamodb/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.0" + version = ">= 4.9.0" } } } diff --git a/modules/ec2-client-vpn/README.md b/modules/ec2-client-vpn/README.md index 8c3f291a1..c4ac715b9 100644 --- a/modules/ec2-client-vpn/README.md +++ b/modules/ec2-client-vpn/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/ec2-client-vpn + - layer/network + - provider/aws +--- + # Component: `ec2-client-vpn` This component is responsible for provisioning VPN Client Endpoints. @@ -6,7 +13,9 @@ This component is responsible for provisioning VPN Client Endpoints. **Stack Level**: Regional -Here's an example snippet for how to use this component. This component should only be applied once as the resources it creates are regional. This is typically done via the corp stack (e.g. `uw2-corp.yaml`). This is because a vpc endpoint requires a vpc and the network stack does not have a vpc. +Here's an example snippet for how to use this component. This component should only be applied once as the resources it +creates are regional. This is typically done via the corp stack (e.g. `uw2-corp.yaml`). This is because a vpc endpoint +requires a vpc and the network stack does not have a vpc. ```yaml components: @@ -23,24 +32,25 @@ components: retention_in_days: 7 organization_name: acme split_tunnel: true - availability_zones: - - us-west-2a - - us-west-2b - - us-west-2c + availability_zones: + - us-west-2a + - us-west-2b + - us-west-2c associated_security_group_ids: [] additional_routes: - - destination_cidr_block: 0.0.0.0/0 - description: Internet Route + - destination_cidr_block: 0.0.0.0/0 + description: Internet Route authorization_rules: - - name: Internet Rule - authorize_all_groups: true - description: Allows routing to the internet" - target_network_cidr: 0.0.0.0/0 + - name: Internet Rule + authorize_all_groups: true + description: Allows routing to the internet" + target_network_cidr: 0.0.0.0/0 ``` ## Deploying -NOTE: This module uses the `aws_ec2_client_vpn_route` resource which throws an error if too many API calls come from a single host. Ignore this error and repeat the terraform command. It usually takes 3 deploys (or destroys) to complete. +NOTE: This module uses the `aws_ec2_client_vpn_route` resource which throws an error if too many API calls come from a +single host. Ignore this error and repeat the terraform command. It usually takes 3 deploys (or destroys) to complete. Error on create (See issue https://github.com/hashicorp/terraform-provider-aws/issues/19750) @@ -56,9 +66,12 @@ timeout while waiting for resource to be gone (last state: 'deleting', timeout: ## Testing -NOTE: The `GoogleIDPMetadata-cloudposse.com.xml` in this repo is equivalent to the one in the `sso` component and is used for testing. This component can only specify a single SAML document. The customer SAML xml should be placed in this directory side-by-side the CloudPosse SAML xml. +NOTE: The `GoogleIDPMetadata-cloudposse.com.xml` in this repo is equivalent to the one in the `sso` component and is +used for testing. This component can only specify a single SAML document. The customer SAML xml should be placed in this +directory side-by-side the CloudPosse SAML xml. -Prior to testing, the component needs to be deployed and the AWS client app needs to be setup by the IdP admin otherwise the following steps will result in an error similar to `app_not_configured_for_user`. +Prior to testing, the component needs to be deployed and the AWS client app needs to be setup by the IdP admin otherwise +the following steps will result in an error similar to `app_not_configured_for_user`. 1. Deploy the component in a regional account with a VPC like `ue2-corp`. 1. Copy the contents of `client_configuration` into a file called `client_configuration.ovpn` @@ -74,22 +87,26 @@ Prior to testing, the component needs to be deployed and the AWS client app need A browser will launch and allow you to connect to the VPN. -1. Make a note of where this component is deployed -1. Ensure that the resource to connect to is in a VPC that is connected by the transit gateway -1. Ensure that the resource to connect to contains a security group with a rule that allows ingress from where the client vpn is deployed (e.g. `ue2-corp`) -1. Use `nmap` to test if the port is `open`. If the port is `filtered` then it's not open. +1. Make a note of where this component is deployed +1. Ensure that the resource to connect to is in a VPC that is connected by the transit gateway +1. Ensure that the resource to connect to contains a security group with a rule that allows ingress from where the + client vpn is deployed (e.g. `ue2-corp`) +1. Use `nmap` to test if the port is `open`. If the port is `filtered` then it's not open. - nmap -p +```console +nmap -p +``` Successful tests have been seen with MSK and RDS. + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [awsutils](#requirement\_awsutils) | >= 0.11.0 | ## Providers @@ -100,10 +117,10 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [ec2\_client\_vpn](#module\_ec2\_client\_vpn) | cloudposse/ec2-client-vpn/aws | 0.11.0 | +| [ec2\_client\_vpn](#module\_ec2\_client\_vpn) | cloudposse/ec2-client-vpn/aws | 0.14.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -128,8 +145,6 @@ No resources. | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [export\_client\_certificate](#input\_export\_client\_certificate) | Flag to determine whether to export the client certificate with the VPN configuration | `bool` | `true` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -161,10 +176,12 @@ No resources. | [vpn\_endpoint\_dns\_name](#output\_vpn\_endpoint\_dns\_name) | The DNS Name of the Client VPN Endpoint Connection. | | [vpn\_endpoint\_id](#output\_vpn\_endpoint\_id) | The ID of the Client VPN Endpoint Connection. | + ## References -* [cloudposse/terraform-aws-ec2-client-vpn](https://github.com/cloudposse/terraform-aws-ec2-client-vpn) - Cloud Posse's upstream component -* [cloudposse/awsutils](https://github.com/cloudposse/terraform-provider-awsutils) - Cloud Posse's awsutils provider +- [cloudposse/terraform-aws-ec2-client-vpn](https://github.com/cloudposse/terraform-aws-ec2-client-vpn) - Cloud Posse's + upstream component +- [cloudposse/awsutils](https://github.com/cloudposse/terraform-provider-awsutils) - Cloud Posse's awsutils provider [](https://cpco.io/component) diff --git a/modules/ec2-client-vpn/default.auto.tfvars b/modules/ec2-client-vpn/default.auto.tfvars deleted file mode 100644 index 86813cc2f..000000000 --- a/modules/ec2-client-vpn/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -enabled = false - -name = "ec2-client-vpn" diff --git a/modules/ec2-client-vpn/main.tf b/modules/ec2-client-vpn/main.tf index a488ace32..8e79a95f6 100644 --- a/modules/ec2-client-vpn/main.tf +++ b/modules/ec2-client-vpn/main.tf @@ -31,7 +31,7 @@ locals { module "ec2_client_vpn" { source = "cloudposse/ec2-client-vpn/aws" - version = "0.11.0" + version = "0.14.0" ca_common_name = var.ca_common_name root_common_name = var.root_common_name diff --git a/modules/ec2-client-vpn/outputs.tf b/modules/ec2-client-vpn/outputs.tf index 6c3bd2883..2154c3c2d 100644 --- a/modules/ec2-client-vpn/outputs.tf +++ b/modules/ec2-client-vpn/outputs.tf @@ -21,4 +21,5 @@ output "client_configuration" { output "full_client_configuration" { value = module.ec2_client_vpn.full_client_configuration description = "Client configuration including client certificate and private key for mutual authentication" + sensitive = true } diff --git a/modules/ec2-client-vpn/provider-awsutils.mixin.tf b/modules/ec2-client-vpn/provider-awsutils.mixin.tf new file mode 100644 index 000000000..70fa8d095 --- /dev/null +++ b/modules/ec2-client-vpn/provider-awsutils.mixin.tf @@ -0,0 +1,14 @@ +provider "awsutils" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/ec2-client-vpn/providers.tf b/modules/ec2-client-vpn/providers.tf index e27b8de2f..ef923e10a 100644 --- a/modules/ec2-client-vpn/providers.tf +++ b/modules/ec2-client-vpn/providers.tf @@ -1,25 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) - } - } -} - -provider "awsutils" { - region = var.region - - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null - - dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] - content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -28,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/ec2-client-vpn/remote-state.tf b/modules/ec2-client-vpn/remote-state.tf index 437923524..757ef9067 100644 --- a/modules/ec2-client-vpn/remote-state.tf +++ b/modules/ec2-client-vpn/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" component = "vpc" diff --git a/modules/ec2-client-vpn/versions.tf b/modules/ec2-client-vpn/versions.tf index b6344fa87..4e65d1ce3 100644 --- a/modules/ec2-client-vpn/versions.tf +++ b/modules/ec2-client-vpn/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } awsutils = { source = "cloudposse/awsutils" diff --git a/modules/ec2-instance/README.md b/modules/ec2-instance/README.md new file mode 100644 index 000000000..958618d7f --- /dev/null +++ b/modules/ec2-instance/README.md @@ -0,0 +1,104 @@ +--- +tags: + - component/ec2-instance + - layer/addons + - provider/aws +--- + +# Component: `ec2-instance` + +This component is responsible for provisioning a single EC2 instance. + +## Usage + +**Stack Level**: Regional + +The typical stack configuration for this component is as follows: + +```yaml +components: + terraform: + ec2-instance: + vars: + enabled: true + name: ec2 +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [template](#requirement\_template) | >= 2.2 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [template](#provider\_template) | >= 2.2 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [ec2\_instance](#module\_ec2\_instance) | cloudposse/ec2-instance/aws | 1.4.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ami.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [template_file.userdata](https://registry.terraform.io/providers/cloudposse/template/latest/docs/data-sources/file) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [ami\_filters](#input\_ami\_filters) | A list of AMI filters for finding the latest AMI |
list(object({
name = string
values = list(string)
}))
|
[
{
"name": "architecture",
"values": [
"x86_64"
]
},
{
"name": "virtualization-type",
"values": [
"hvm"
]
}
]
| no | +| [ami\_name\_regex](#input\_ami\_name\_regex) | The regex used to match the latest AMI to be used for the EC2 instance. | `string` | `"^amzn2-ami-hvm.*"` | no | +| [ami\_owner](#input\_ami\_owner) | The owner of the AMI used for the ZScaler EC2 instances. | `string` | `"amazon"` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [instance\_type](#input\_instance\_type) | The instance family to use for the EC2 instance | `string` | `"t3a.micro"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS region | `string` | n/a | yes | +| [security\_group\_rules](#input\_security\_group\_rules) | A list of maps of Security Group rules.
The values of map is fully completed with `aws_security_group_rule` resource.
To get more info see [security\_group\_rule](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule). | `list(any)` |
[
{
"cidr_blocks": [
"0.0.0.0/0"
],
"from_port": 0,
"protocol": "-1",
"to_port": 65535,
"type": "egress"
}
]
| no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [user\_data](#input\_user\_data) | User data to be included with this EC2 instance | `string` | `"echo \"hello user data\""` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instance\_id](#output\_instance\_id) | Instance ID | +| [private\_ip](#output\_private\_ip) | Private IP of the instance | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/ec2-instance) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/ec2-instance/context.tf b/modules/ec2-instance/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/ec2-instance/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/ec2-instance/main.tf b/modules/ec2-instance/main.tf new file mode 100644 index 000000000..8ac773d12 --- /dev/null +++ b/modules/ec2-instance/main.tf @@ -0,0 +1,51 @@ +locals { + enabled = module.this.enabled + + vpc_id = module.vpc.outputs.vpc_id + # basic usage picks the first private subnet from the vpc component + vpc_private_subnet_ids = sort(module.vpc.outputs.private_subnet_ids) + subnet_id = local.vpc_private_subnet_ids[0] +} + +data "aws_ami" "this" { + count = local.enabled ? 1 : 0 + + most_recent = true + owners = [var.ami_owner] + name_regex = var.ami_name_regex + + dynamic "filter" { + for_each = toset(var.ami_filters) + content { + name = filter.value.name + values = filter.value.values + } + } +} + +data "template_file" "userdata" { + count = local.enabled ? 1 : 0 + template = file("${path.module}/templates/userdata.sh.tmpl") + + vars = { + user_data = var.user_data + } +} + +module "ec2_instance" { + source = "cloudposse/ec2-instance/aws" + version = "1.4.0" + + enabled = local.enabled + + ami = local.enabled ? data.aws_ami.this[0].id : "" + ami_owner = var.ami_owner + instance_type = var.instance_type + user_data_base64 = local.enabled ? base64encode(data.template_file.userdata[0].rendered) : "" + + subnet = local.subnet_id + vpc_id = local.vpc_id + security_group_rules = var.security_group_rules + + context = module.this.context +} diff --git a/modules/ec2-instance/outputs.tf b/modules/ec2-instance/outputs.tf new file mode 100644 index 000000000..bdee37c9b --- /dev/null +++ b/modules/ec2-instance/outputs.tf @@ -0,0 +1,9 @@ +output "instance_id" { + value = module.ec2_instance[*].id + description = "Instance ID" +} + +output "private_ip" { + value = module.ec2_instance[*].private_ip + description = "Private IP of the instance" +} diff --git a/modules/ec2-instance/providers.tf b/modules/ec2-instance/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/ec2-instance/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/ec2-instance/remote-state.tf b/modules/ec2-instance/remote-state.tf new file mode 100644 index 000000000..757ef9067 --- /dev/null +++ b/modules/ec2-instance/remote-state.tf @@ -0,0 +1,8 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "vpc" + + context = module.this.context +} diff --git a/modules/ec2-instance/templates/userdata.sh.tmpl b/modules/ec2-instance/templates/userdata.sh.tmpl new file mode 100644 index 000000000..bcf19e6d5 --- /dev/null +++ b/modules/ec2-instance/templates/userdata.sh.tmpl @@ -0,0 +1,3 @@ +#!/bin/bash + +${user_data} diff --git a/modules/ec2-instance/variables.tf b/modules/ec2-instance/variables.tf new file mode 100644 index 000000000..cd87f269c --- /dev/null +++ b/modules/ec2-instance/variables.tf @@ -0,0 +1,64 @@ +variable "region" { + type = string + description = "AWS region" +} + +variable "ami_owner" { + type = string + description = "The owner of the AMI used for the ZScaler EC2 instances." + default = "amazon" +} + +variable "ami_name_regex" { + type = string + description = "The regex used to match the latest AMI to be used for the EC2 instance." + default = "^amzn2-ami-hvm.*" +} + +variable "ami_filters" { + type = list(object({ + name = string + values = list(string) + })) + default = [ + { + name = "architecture" + values = ["x86_64"] + }, + { + name = "virtualization-type" + values = ["hvm"] + } + ] + description = "A list of AMI filters for finding the latest AMI" +} + +variable "instance_type" { + type = string + default = "t3a.micro" + description = "The instance family to use for the EC2 instance" +} + +variable "security_group_rules" { + type = list(any) + default = [ + { + type = "egress" + from_port = 0 + to_port = 65535 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + ] + description = <<-EOT + A list of maps of Security Group rules. + The values of map is fully completed with `aws_security_group_rule` resource. + To get more info see [security_group_rule](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule). + EOT +} + +variable "user_data" { + type = string + default = "echo \"hello user data\"" + description = "User data to be included with this EC2 instance" +} diff --git a/modules/ec2-instance/versions.tf b/modules/ec2-instance/versions.tf new file mode 100644 index 000000000..7a90cef78 --- /dev/null +++ b/modules/ec2-instance/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + template = { + source = "cloudposse/template" + version = ">= 2.2" + } + } +} diff --git a/modules/ecr/README.md b/modules/ecr/README.md index a5b6ae6ce..78d9ae4e4 100644 --- a/modules/ecr/README.md +++ b/modules/ecr/README.md @@ -1,18 +1,31 @@ +--- +tags: + - component/ecr + - layer/baseline + - provider/aws +--- + # Component: `ecr` This component is responsible for provisioning repositories, lifecycle rules, and permissions for streamlined ECR usage. -This utilizes [the roles-to-principals submodule](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/account-map/modules/roles-to-principals) +This utilizes +[the roles-to-principals submodule](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/account-map/modules/roles-to-principals) to assign accounts to various roles. It is also compatible with the [GitHub Actions IAM Role mixin](https://github.com/cloudposse/terraform-aws-components/blob/master/mixins/github-actions-iam-role/README-github-action-iam-role.md). +> [!WARNING] +> +> Older versions of our reference architecture have an`eks-iam` component that needs to be updated to provide sufficient +> IAM roles to allow pods to pull from ECR repos + ## Usage **Stack Level**: Regional Here's an example snippet for how to use this component. This component is normally only applied once as the resources -it creates are globally accessible, but you may want to create ECRs in multiple regions for redundancy. -This is typically provisioned via the stack for the "artifact" account (typically `auto`, `artifact`, or `corp`) -in the primary region. +it creates are globally accessible, but you may want to create ECRs in multiple regions for redundancy. This is +typically provisioned via the stack for the "artifact" account (typically `auto`, `artifact`, or `corp`) in the primary +region. ```yaml components: @@ -34,10 +47,10 @@ components: - microservice-c read_write_account_role_map: identity: - - admin - - cicd + - admin + - cicd automation: - - admin + - admin read_only_account_role_map: corp: ["*"] dev: ["*"] @@ -45,6 +58,7 @@ components: stage: ["*"] ``` + ## Requirements @@ -63,7 +77,7 @@ components: | Name | Source | Version | |------|--------|---------| -| [ecr](#module\_ecr) | cloudposse/ecr/aws | 0.34.0 | +| [ecr](#module\_ecr) | cloudposse/ecr/aws | 0.36.0 | | [full\_access](#module\_full\_access) | ../account-map/modules/roles-to-principals | n/a | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [readonly\_access](#module\_readonly\_access) | ../account-map/modules/roles-to-principals | n/a | @@ -94,8 +108,6 @@ components: | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | | [image\_tag\_mutability](#input\_image\_tag\_mutability) | The tag mutability setting for the repository. Must be one of: `MUTABLE` or `IMMUTABLE` | `string` | `"MUTABLE"` | no | | [images](#input\_images) | List of image names (ECR repo names) to create repos for | `list(string)` | n/a | yes | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -103,6 +115,7 @@ components: | [max\_image\_count](#input\_max\_image\_count) | Max number of images to store. Old ones will be deleted to make room for new ones. | `number` | n/a | yes | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [principals\_lambda](#input\_principals\_lambda) | Principal account IDs of Lambdas allowed to consume ECR | `list(string)` | `[]` | no | | [protected\_tags](#input\_protected\_tags) | Tags to refrain from deleting | `list(string)` | `[]` | no | | [read\_only\_account\_role\_map](#input\_read\_only\_account\_role\_map) | Map of `account:[role, role...]` for read-only access. Use `*` for role to grant access to entire account | `map(list(string))` | `{}` | no | | [read\_write\_account\_role\_map](#input\_read\_write\_account\_role\_map) | Map of `account:[role, role...]` for write access. Use `*` for role to grant access to entire account | `map(list(string))` | n/a | yes | @@ -122,11 +135,19 @@ components: | [ecr\_user\_arn](#output\_ecr\_user\_arn) | ECR user ARN | | [ecr\_user\_name](#output\_ecr\_user\_name) | ECR user name | | [ecr\_user\_unique\_id](#output\_ecr\_user\_unique\_id) | ECR user unique ID assigned by AWS | +| [repository\_host](#output\_repository\_host) | ECR repository name | + + +## Related + +- [Decide How to distribute Docker Images](https://docs.cloudposse.com/layers/software-delivery/design-decisions/decide-how-to-distribute-docker-images/) +- [Decide on ECR Strategy](https://docs.cloudposse.com/layers/project/design-decisions/decide-on-ecr-strategy/) ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/ecr) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/ecr) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/ecr/default.auto.tfvars b/modules/ecr/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/ecr/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/ecr/main.tf b/modules/ecr/main.tf index 038f98184..e6b5621d1 100644 --- a/modules/ecr/main.tf +++ b/modules/ecr/main.tf @@ -20,7 +20,7 @@ locals { module "ecr" { source = "cloudposse/ecr/aws" - version = "0.34.0" + version = "0.36.0" protected_tags = var.protected_tags enable_lifecycle_policy = var.enable_lifecycle_policy @@ -29,6 +29,7 @@ module "ecr" { max_image_count = var.max_image_count principals_full_access = compact(concat(module.full_access.principals, [local.ecr_user_arn])) principals_readonly_access = module.readonly_access.principals + principals_lambda = var.principals_lambda scan_images_on_push = var.scan_images_on_push use_fullname = false diff --git a/modules/ecr/outputs.tf b/modules/ecr/outputs.tf index bb574f900..790d8a423 100644 --- a/modules/ecr/outputs.tf +++ b/modules/ecr/outputs.tf @@ -3,6 +3,11 @@ output "ecr_repo_arn_map" { description = "Map of image names to ARNs" } +output "repository_host" { + value = try(split("/", module.ecr.repository_url)[0], null) + description = "ECR repository name" +} + output "ecr_repo_url_map" { value = module.ecr.repository_url_map description = "Map of image names to URLs" diff --git a/modules/ecr/providers.tf b/modules/ecr/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/ecr/providers.tf +++ b/modules/ecr/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/ecr/variables.tf b/modules/ecr/variables.tf index 293d2b51d..45dce3abe 100644 --- a/modules/ecr/variables.tf +++ b/modules/ecr/variables.tf @@ -52,3 +52,9 @@ variable "enable_lifecycle_policy" { type = bool description = "Enable/disable image lifecycle policy" } + +variable "principals_lambda" { + type = list(string) + description = "Principal account IDs of Lambdas allowed to consume ECR" + default = [] +} diff --git a/modules/ecs-service/CHANGELOG.md b/modules/ecs-service/CHANGELOG.md new file mode 100644 index 000000000..df4f4de64 --- /dev/null +++ b/modules/ecs-service/CHANGELOG.md @@ -0,0 +1,11 @@ +## PR [#1008](https://github.com/cloudposse/terraform-aws-components/pull/1008) + +### Possible Breaking Change + +- Refactored how S3 Task Definitions and the Terraform Task definition are merged. + - Introduced local `local.containers_priority_terraform` to be referenced whenever terraform Should take priority + - Introduced local `local.containers_priority_s3` to be referenced whenever S3 Should take priority +- `map_secrets` pulled out from container definition to local where it can be better maintained. Used Terraform as + priority as it is a calculated as a map of arns. +- `s3_mirror_name` now automatically uploads a task-template.json to s3 mirror where it can be pulled from GitHub + Actions. diff --git a/modules/ecs-service/README.md b/modules/ecs-service/README.md index 544c9b41d..eb65e229e 100644 --- a/modules/ecs-service/README.md +++ b/modules/ecs-service/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/ecs-service + - layer/ecs + - provider/aws +--- + # Component: `ecs-service` This component is responsible for creating an ECS service. @@ -101,7 +108,10 @@ components: hostPort: 80 protocol: tcp command: - - '/bin/sh -c "echo '' Amazon ECS Sample App

Amazon ECS Sample App

Congratulations!

Your application is now running on a container in Amazon ECS.

'' > /usr/local/apache2/htdocs/index.html && httpd-foreground"' + - '/bin/sh -c "echo '' Amazon ECS Sample App

Amazon ECS + Sample App

Congratulations!

Your application is now running on a container in Amazon + ECS.

'' > /usr/local/apache2/htdocs/index.html && httpd-foreground"' entrypoint: ["sh", "-c"] task: desired_count: 1 @@ -144,128 +154,328 @@ components: task_cpu: 256 ``` +#### Other Domains + +This component supports alternate service names for your ECS Service through a couple of variables: + +- `vanity_domain` & `vanity_alias` - This will create a route to the service in the listener rules of the ALB. This will + also create a Route 53 alias record in the hosted zone in this account. The hosted zone is looked up by the + `vanity_domain` input. +- `additional_targets` - This will create a route to the service in the listener rules of the ALB. This will not create + a Route 53 alias record. + +Examples: + +```yaml +ecs/platform/service/echo-server: + vars: + vanity_domain: "dev-acme.com" + vanity_alias: + - "echo-server.dev-acme.com" + additional_targets: + - "echo.acme.com" +``` + +This then creates the following listener rules: + +```text +HTTP Host Header is +echo-server.public-platform.use2.dev.plat.service-discovery.com + OR echo-server.dev-acme.com + OR echo.acme.com +``` + +It will also create the record in Route53 to point `"echo-server.dev-acme.com"` to the ALB. Thus +`"echo-server.dev-acme.com"` should resolve. + +We can then create a pointer to this service in the `acme.come` hosted zone. + +```yaml +dns-primary: + vars: + domain_names: + - acme.com + record_config: + - root_zone: acme.com + name: echo. + type: CNAME + ttl: 60 + records: + - echo-server.dev-acme.com +``` + +This will create a CNAME record in the `acme.com` hosted zone that points `echo.acme.com` to `echo-server.dev-acme.com`. + +### EFS + +EFS is supported by this ecs service, you can use either `efs_volumes` or `efs_component_volumes` in your task +definition. + +This example shows how to use `efs_component_volumes` which remote looks up efs component and uses the `efs_id` to mount +the volume. And how to use `efs_volumes` + +```yaml +components: + terraform: + ecs-services/my-service: + metadata: + component: ecs-service + inherits: + - ecs-services/defaults + vars: + containers: + service: + name: app + image: my-image:latest + log_configuration: + logDriver: awslogs + options: {} + port_mappings: + - containerPort: 8080 + hostPort: 8080 + protocol: tcp + mount_points: + - containerPath: "/var/lib/" + sourceVolume: "my-volume-mount" + + task: + efs_component_volumes: + - name: "my-volume-mount" + host_path: null + efs_volume_configuration: + - component: efs/my-volume-mount + root_directory: "/var/lib/" + transit_encryption: "ENABLED" + transit_encryption_port: 2999 + authorization_config: [] + efs_volumes: + - name: "my-volume-mount-2" + host_path: null + efs_volume_ configuration: + - file_system_id: "fs-1234" + root_directory: "/var/lib/" + transit_encryption: "ENABLED" + transit_encryption_port: 2998 + authorization_config: [] +``` + + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.66.1 | +| [jq](#requirement\_jq) | >=0.2.0 | +| [template](#requirement\_template) | >= 2.2 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.66.1 | +| [jq](#provider\_jq) | >=0.2.0 | +| [template](#provider\_template) | >= 2.2 | ## Modules | Name | Source | Version | |------|--------|---------| -| [alb\_ecs\_label](#module\_alb\_ecs\_label) | cloudposse/label/null | 0.25.0 | -| [alb\_ingress](#module\_alb\_ingress) | cloudposse/alb-ingress/aws | 0.24.3 | -| [container\_definition](#module\_container\_definition) | cloudposse/ecs-container-definition/aws | 0.58.1 | -| [ecs\_alb\_service\_task](#module\_ecs\_alb\_service\_task) | cloudposse/ecs-alb-service-task/aws | 0.66.0 | +| [alb](#module\_alb) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [alb\_ingress](#module\_alb\_ingress) | cloudposse/alb-ingress/aws | 0.28.0 | +| [cloudmap\_namespace](#module\_cloudmap\_namespace) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [cloudmap\_namespace\_service\_discovery](#module\_cloudmap\_namespace\_service\_discovery) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [container\_definition](#module\_container\_definition) | cloudposse/ecs-container-definition/aws | 0.61.1 | +| [datadog\_configuration](#module\_datadog\_configuration) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [datadog\_container\_definition](#module\_datadog\_container\_definition) | cloudposse/ecs-container-definition/aws | 0.58.1 | +| [datadog\_fluent\_bit\_container\_definition](#module\_datadog\_fluent\_bit\_container\_definition) | cloudposse/ecs-container-definition/aws | 0.58.1 | +| [datadog\_sidecar\_logs](#module\_datadog\_sidecar\_logs) | cloudposse/cloudwatch-logs/aws | 0.6.6 | +| [ecs\_alb\_service\_task](#module\_ecs\_alb\_service\_task) | cloudposse/ecs-alb-service-task/aws | 0.72.0 | | [ecs\_cloudwatch\_autoscaling](#module\_ecs\_cloudwatch\_autoscaling) | cloudposse/ecs-cloudwatch-autoscaling/aws | 0.7.3 | -| [ecs\_label](#module\_ecs\_label) | cloudposse/label/null | 0.25.0 | +| [ecs\_cloudwatch\_sns\_alarms](#module\_ecs\_cloudwatch\_sns\_alarms) | cloudposse/ecs-cloudwatch-sns-alarms/aws | 0.12.3 | +| [ecs\_cluster](#module\_ecs\_cluster) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [efs](#module\_efs) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [gha\_assume\_role](#module\_gha\_assume\_role) | ../account-map/modules/team-assume-role-policy | n/a | +| [gha\_role\_name](#module\_gha\_role\_name) | cloudposse/label/null | 0.25.0 | +| [iam\_role](#module\_iam\_role) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [logs](#module\_logs) | cloudposse/cloudwatch-logs/aws | 0.6.6 | -| [rds\_sg\_label](#module\_rds\_sg\_label) | cloudposse/label/null | 0.25.0 | +| [logs](#module\_logs) | cloudposse/cloudwatch-logs/aws | 0.6.8 | +| [nlb](#module\_nlb) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [rds](#module\_rds) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [roles\_to\_principals](#module\_roles\_to\_principals) | ../account-map/modules/roles-to-principals | n/a | +| [s3](#module\_s3) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [security\_group](#module\_security\_group) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [service\_domain](#module\_service\_domain) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | | [vanity\_alias](#module\_vanity\_alias) | cloudposse/route53-alias/aws | 0.13.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources | Name | Type | |------|------| | [aws_iam_policy.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_role_policy_attachment.task](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role.github_actions](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_kinesis_stream.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kinesis_stream) | resource | -| [aws_ecs_cluster.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecs_cluster) | data source | +| [aws_s3_bucket_object.task_definition_template](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_object) | resource | +| [aws_security_group_rule.custom_sg_rules](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_service_discovery_service.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/service_discovery_service) | resource | +| [aws_ssm_parameter.full_urls](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_ecs_task_definition.created_task](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecs_task_definition) | data source | +| [aws_iam_policy_document.github_actions_iam_ecspresso_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.github_actions_iam_platform_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.github_actions_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_kms_alias.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/kms_alias) | data source | -| [aws_lb.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lb) | data source | -| [aws_lb_listener.selected_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lb_listener) | data source | | [aws_route53_zone.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/route53_zone) | data source | | [aws_route53_zone.selected_vanity](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/route53_zone) | data source | -| [aws_security_group.lb](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/security_group) | data source | -| [aws_security_group.rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/security_group) | data source | -| [aws_security_group.vpc_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/security_group) | data source | -| [aws_subnets.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | -| [aws_vpc.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | +| [aws_s3_object.task_definition](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/s3_object) | data source | +| [aws_s3_objects.mirror](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/s3_objects) | data source | +| [aws_ssm_parameters_by_path.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameters_by_path) | data source | +| [jq_query.service_domain_query](https://registry.terraform.io/providers/massdriver-cloud/jq/latest/docs/data-sources/query) | data source | +| [template_file.envs](https://registry.terraform.io/providers/cloudposse/template/latest/docs/data-sources/file) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [account\_stage](#input\_account\_stage) | The ecr stage (account) name to use for the fully qualified stage parameter store. | `string` | `"auto"` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [additional\_targets](#input\_additional\_targets) | Additional target routes to add to the ALB that point to this service. The only difference between this and `var.vanity_alias` is `var.vanity_alias` will create an alias record in Route 53 in the hosted zone in this account as well. `var.additional_targets` only adds the listener route to this service's target group. | `list(string)` | `[]` | no | +| [alb\_configuration](#input\_alb\_configuration) | The configuration to use for the ALB, specifying which cluster alb configuration to use | `string` | `"default"` | no | +| [alb\_name](#input\_alb\_name) | The name of the ALB this service should attach to | `string` | `null` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [autoscaling\_dimension](#input\_autoscaling\_dimension) | The dimension to use to decide to autoscale | `string` | `"cpu"` | no | +| [autoscaling\_enabled](#input\_autoscaling\_enabled) | Should this service autoscale using SNS alarams | `bool` | `true` | no | +| [chamber\_service](#input\_chamber\_service) | SSM parameter service name for use with chamber. This is used in chamber\_format where /$chamber\_service/$name/$container\_name/$parameter would be the default. | `string` | `"ecs-service"` | no | | [cluster\_attributes](#input\_cluster\_attributes) | The attributes of the cluster name e.g. if the full name is `namespace-tenant-environment-dev-ecs-b2b` then the `cluster_name` is `ecs` and this value should be `b2b`. | `list(string)` | `[]` | no | -| [cluster\_full\_name](#input\_cluster\_full\_name) | The fully qualified name of the cluster. This will override the `cluster_suffix`. | `string` | `""` | no | -| [cluster\_name](#input\_cluster\_name) | The name of the cluster | `string` | `"ecs"` | no | -| [containers](#input\_containers) | Feed inputs into container definition module | `any` | `{}` | no | +| [containers](#input\_containers) | Feed inputs into container definition module |
map(object({
name = string
ecr_image = optional(string)
image = optional(string)
memory = optional(number)
memory_reservation = optional(number)
cpu = optional(number)
essential = optional(bool, true)
readonly_root_filesystem = optional(bool, null)
privileged = optional(bool, null)
container_depends_on = optional(list(object({
containerName = string
condition = string # START, COMPLETE, SUCCESS, HEALTHY
})), null)

port_mappings = optional(list(object({
containerPort = number
hostPort = optional(number)
protocol = optional(string)
name = optional(string)
appProtocol = optional(string)
})), [])
command = optional(list(string), null)
entrypoint = optional(list(string), null)
healthcheck = optional(object({
command = list(string)
interval = number
retries = number
startPeriod = number
timeout = number
}), null)
ulimits = optional(list(object({
name = string
softLimit = number
hardLimit = number
})), null)
log_configuration = optional(object({
logDriver = string
options = optional(map(string), {})
}))
docker_labels = optional(map(string), null)
map_environment = optional(map(string), {})
map_secrets = optional(map(string), {})
volumes_from = optional(list(object({
sourceContainer = string
readOnly = bool
})), null)
mount_points = optional(list(object({
sourceVolume = optional(string)
containerPath = optional(string)
readOnly = optional(bool)
})), [])
}))
| `{}` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [cpu\_utilization\_high\_alarm\_actions](#input\_cpu\_utilization\_high\_alarm\_actions) | A list of ARNs (i.e. SNS Topic ARN) to notify on CPU Utilization High Alarm action | `list(string)` | `[]` | no | +| [cpu\_utilization\_high\_evaluation\_periods](#input\_cpu\_utilization\_high\_evaluation\_periods) | Number of periods to evaluate for the alarm | `number` | `1` | no | +| [cpu\_utilization\_high\_ok\_actions](#input\_cpu\_utilization\_high\_ok\_actions) | A list of ARNs (i.e. SNS Topic ARN) to notify on CPU Utilization High OK action | `list(string)` | `[]` | no | +| [cpu\_utilization\_high\_period](#input\_cpu\_utilization\_high\_period) | Duration in seconds to evaluate for the alarm | `number` | `300` | no | +| [cpu\_utilization\_high\_threshold](#input\_cpu\_utilization\_high\_threshold) | The maximum percentage of CPU utilization average | `number` | `80` | no | +| [cpu\_utilization\_low\_alarm\_actions](#input\_cpu\_utilization\_low\_alarm\_actions) | A list of ARNs (i.e. SNS Topic ARN) to notify on CPU Utilization Low Alarm action | `list(string)` | `[]` | no | +| [cpu\_utilization\_low\_evaluation\_periods](#input\_cpu\_utilization\_low\_evaluation\_periods) | Number of periods to evaluate for the alarm | `number` | `1` | no | +| [cpu\_utilization\_low\_ok\_actions](#input\_cpu\_utilization\_low\_ok\_actions) | A list of ARNs (i.e. SNS Topic ARN) to notify on CPU Utilization Low OK action | `list(string)` | `[]` | no | +| [cpu\_utilization\_low\_period](#input\_cpu\_utilization\_low\_period) | Duration in seconds to evaluate for the alarm | `number` | `300` | no | +| [cpu\_utilization\_low\_threshold](#input\_cpu\_utilization\_low\_threshold) | The minimum percentage of CPU utilization average | `number` | `20` | no | +| [custom\_security\_group\_rules](#input\_custom\_security\_group\_rules) | The list of custom security group rules to add to the service security group |
list(object({
type = string
from_port = number
to_port = number
protocol = string
cidr_blocks = list(string)
description = optional(string)
}))
| `[]` | no | +| [datadog\_agent\_sidecar\_enabled](#input\_datadog\_agent\_sidecar\_enabled) | Enable the Datadog Agent Sidecar | `bool` | `false` | no | +| [datadog\_log\_method\_is\_firelens](#input\_datadog\_log\_method\_is\_firelens) | Datadog logs can be sent via cloudwatch logs (and lambda) or firelens, set this to true to enable firelens via a sidecar container for fluentbit | `bool` | `false` | no | +| [datadog\_logging\_default\_tags\_enabled](#input\_datadog\_logging\_default\_tags\_enabled) | Add Default tags to all logs sent to Datadog | `bool` | `true` | no | +| [datadog\_logging\_tags](#input\_datadog\_logging\_tags) | Tags to add to all logs sent to Datadog | `map(string)` | `null` | no | +| [datadog\_sidecar\_containers\_logs\_enabled](#input\_datadog\_sidecar\_containers\_logs\_enabled) | Enable the Datadog Agent Sidecar to send logs to aws cloudwatch group, requires `datadog_agent_sidecar_enabled` to be true | `bool` | `true` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [domain\_name](#input\_domain\_name) | The domain name to use as the host header suffix | `string` | `""` | no | | [ecr\_region](#input\_ecr\_region) | The region to use for the fully qualified ECR image URL. Defaults to the current region. | `string` | `""` | no | | [ecr\_stage\_name](#input\_ecr\_stage\_name) | The ecr stage (account) name to use for the fully qualified ECR image URL. | `string` | `"auto"` | no | -| [ecs\_service\_enabled](#input\_ecs\_service\_enabled) | Whether to create the ECS service | `bool` | `true` | no | +| [ecs\_cluster\_name](#input\_ecs\_cluster\_name) | The name of the ECS Cluster this belongs to | `any` | `"ecs"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [exec\_enabled](#input\_exec\_enabled) | Specifies whether to enable Amazon ECS Exec for the tasks within the service | `bool` | `false` | no | +| [github\_actions\_allowed\_repos](#input\_github\_actions\_allowed\_repos) | A list of the GitHub repositories that are allowed to assume this role from GitHub Actions. For example,
["cloudposse/infra-live"]. Can contain "*" as wildcard.
If org part of repo name is omitted, "cloudposse" will be assumed. | `list(string)` | `[]` | no | +| [github\_actions\_ecspresso\_enabled](#input\_github\_actions\_ecspresso\_enabled) | Create IAM policies required for deployments with Ecspresso | `bool` | `false` | no | +| [github\_actions\_iam\_role\_attributes](#input\_github\_actions\_iam\_role\_attributes) | Additional attributes to add to the role name | `list(string)` | `[]` | no | +| [github\_actions\_iam\_role\_enabled](#input\_github\_actions\_iam\_role\_enabled) | Flag to toggle creation of an IAM Role that GitHub Actions can assume to access AWS resources | `bool` | `false` | no | +| [github\_oidc\_trusted\_role\_arns](#input\_github\_oidc\_trusted\_role\_arns) | A list of IAM Role ARNs allowed to assume this cluster's GitHub OIDC role | `list(string)` | `[]` | no | +| [health\_check\_healthy\_threshold](#input\_health\_check\_healthy\_threshold) | The number of consecutive health checks successes required before healthy | `number` | `2` | no | +| [health\_check\_interval](#input\_health\_check\_interval) | The duration in seconds in between health checks | `number` | `15` | no | +| [health\_check\_matcher](#input\_health\_check\_matcher) | The HTTP response codes to indicate a healthy check | `string` | `"200-404"` | no | +| [health\_check\_path](#input\_health\_check\_path) | The destination for the health check request | `string` | `"/health"` | no | +| [health\_check\_port](#input\_health\_check\_port) | The port to use to connect with the target. Valid values are either ports 1-65536, or `traffic-port`. Defaults to `traffic-port` | `string` | `"traffic-port"` | no | +| [health\_check\_timeout](#input\_health\_check\_timeout) | The amount of time to wait in seconds before failing a health check request | `number` | `10` | no | +| [health\_check\_unhealthy\_threshold](#input\_health\_check\_unhealthy\_threshold) | The number of consecutive health check failures required before unhealthy | `number` | `2` | no | +| [http\_protocol](#input\_http\_protocol) | Which http protocol to use in outputs and SSM url params. This value is ignored if a load balancer is not used. If it is `null`, the redirect value from the ALB determines the protocol. | `string` | `null` | no | | [iam\_policy\_enabled](#input\_iam\_policy\_enabled) | If set to true will create IAM policy in AWS | `bool` | `false` | no | | [iam\_policy\_statements](#input\_iam\_policy\_statements) | Map of IAM policy statements to use in the policy. This can be used with or instead of the `var.iam_source_json_url`. | `any` | `{}` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kinesis\_enabled](#input\_kinesis\_enabled) | Enable Kinesis | `bool` | `false` | no | +| [kms\_alias\_name\_ssm](#input\_kms\_alias\_name\_ssm) | KMS alias name for SSM | `string` | `"alias/aws/ssm"` | no | | [kms\_key\_alias](#input\_kms\_key\_alias) | ID of KMS key | `string` | `"default"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | -| [lb\_match\_tags](#input\_lb\_match\_tags) | The additional matching tags for the LB data source. Used with current namespace, tenant, env, and stage tags. | `map(string)` | `{}` | no | +| [lb\_catch\_all](#input\_lb\_catch\_all) | Should this service act as catch all for all subdomain hosts of the vanity domain | `bool` | `false` | no | | [logs](#input\_logs) | Feed inputs into cloudwatch logs module | `any` | `{}` | no | +| [memory\_utilization\_high\_alarm\_actions](#input\_memory\_utilization\_high\_alarm\_actions) | A list of ARNs (i.e. SNS Topic ARN) to notify on Memory Utilization High Alarm action | `list(string)` | `[]` | no | +| [memory\_utilization\_high\_evaluation\_periods](#input\_memory\_utilization\_high\_evaluation\_periods) | Number of periods to evaluate for the alarm | `number` | `1` | no | +| [memory\_utilization\_high\_ok\_actions](#input\_memory\_utilization\_high\_ok\_actions) | A list of ARNs (i.e. SNS Topic ARN) to notify on Memory Utilization High OK action | `list(string)` | `[]` | no | +| [memory\_utilization\_high\_period](#input\_memory\_utilization\_high\_period) | Duration in seconds to evaluate for the alarm | `number` | `300` | no | +| [memory\_utilization\_high\_threshold](#input\_memory\_utilization\_high\_threshold) | The maximum percentage of Memory utilization average | `number` | `80` | no | +| [memory\_utilization\_low\_alarm\_actions](#input\_memory\_utilization\_low\_alarm\_actions) | A list of ARNs (i.e. SNS Topic ARN) to notify on Memory Utilization Low Alarm action | `list(string)` | `[]` | no | +| [memory\_utilization\_low\_evaluation\_periods](#input\_memory\_utilization\_low\_evaluation\_periods) | Number of periods to evaluate for the alarm | `number` | `1` | no | +| [memory\_utilization\_low\_ok\_actions](#input\_memory\_utilization\_low\_ok\_actions) | A list of ARNs (i.e. SNS Topic ARN) to notify on Memory Utilization Low OK action | `list(string)` | `[]` | no | +| [memory\_utilization\_low\_period](#input\_memory\_utilization\_low\_period) | Duration in seconds to evaluate for the alarm | `number` | `300` | no | +| [memory\_utilization\_low\_threshold](#input\_memory\_utilization\_low\_threshold) | The minimum percentage of Memory utilization average | `number` | `20` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [public\_lb\_enabled](#input\_public\_lb\_enabled) | Whether or not to use public LB and public subnets | `bool` | `false` | no | +| [nlb\_name](#input\_nlb\_name) | The name of the NLB this service should attach to | `string` | `null` | no | +| [rds\_name](#input\_rds\_name) | The name of the RDS database this service should allow access to | `any` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [retention\_period](#input\_retention\_period) | Length of time data records are accessible after they are added to the stream | `string` | `"48"` | no | -| [shard\_count](#input\_shard\_count) | Number of shards that the stream will use | `string` | `"1"` | no | -| [shard\_level\_metrics](#input\_shard\_level\_metrics) | List of shard-level CloudWatch metrics which can be enabled for the stream | `list` |
[
"IncomingBytes",
"IncomingRecords",
"IteratorAgeMilliseconds",
"OutgoingBytes",
"OutgoingRecords",
"ReadProvisionedThroughputExceeded",
"WriteProvisionedThroughputExceeded"
]
| no | +| [retention\_period](#input\_retention\_period) | Length of time data records are accessible after they are added to the stream | `number` | `48` | no | +| [s3\_mirror\_name](#input\_s3\_mirror\_name) | The name of the S3 mirror component | `string` | `null` | no | +| [service\_connect\_configurations](#input\_service\_connect\_configurations) | The list of Service Connect configurations.
See `service_connect_configuration` docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_service#service_connect_configuration |
list(object({
enabled = bool
namespace = optional(string, null)
log_configuration = optional(object({
log_driver = string
options = optional(map(string), null)
secret_option = optional(list(object({
name = string
value_from = string
})), [])
}), null)
service = optional(list(object({
client_alias = list(object({
dns_name = string
port = number
}))
discovery_name = optional(string, null)
ingress_port_override = optional(number, null)
port_name = string
})), [])
}))
| `[]` | no | +| [service\_registries](#input\_service\_registries) | The list of Service Registries.
See `service_registries` docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_service#service_registries |
list(object({
namespace = string
registry_arn = optional(string)
port = optional(number)
container_name = optional(string)
container_port = optional(number)
}))
| `[]` | no | +| [shard\_count](#input\_shard\_count) | Number of shards that the stream will use | `number` | `1` | no | +| [shard\_level\_metrics](#input\_shard\_level\_metrics) | List of shard-level CloudWatch metrics which can be enabled for the stream | `list(string)` |
[
"IncomingBytes",
"IncomingRecords",
"IteratorAgeMilliseconds",
"OutgoingBytes",
"OutgoingRecords",
"ReadProvisionedThroughputExceeded",
"WriteProvisionedThroughputExceeded"
]
| no | +| [ssm\_enabled](#input\_ssm\_enabled) | If `true` create SSM keys for the database user and password. | `bool` | `false` | no | +| [ssm\_key\_format](#input\_ssm\_key\_format) | SSM path format. The values will will be used in the following order: `var.ssm_key_prefix`, `var.name`, `var.ssm_key_*` | `string` | `"/%v/%v/%v"` | no | +| [ssm\_key\_prefix](#input\_ssm\_key\_prefix) | SSM path prefix. Omit the leading forward slash `/`. | `string` | `"ecs-service"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [stickiness\_cookie\_duration](#input\_stickiness\_cookie\_duration) | The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds) | `number` | `86400` | no | +| [stickiness\_enabled](#input\_stickiness\_enabled) | Boolean to enable / disable `stickiness`. Default is `true` | `bool` | `true` | no | +| [stickiness\_type](#input\_stickiness\_type) | The type of sticky sessions. The only current possible value is `lb_cookie` | `string` | `"lb_cookie"` | no | | [stream\_mode](#input\_stream\_mode) | Stream mode details for the Kinesis stream | `string` | `"PROVISIONED"` | no | -| [subnet\_match\_tags](#input\_subnet\_match\_tags) | The additional matching tags for the VPC subnet data source. Used with current namespace, tenant, env, and stage tags. | `map(string)` | `{}` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | -| [task](#input\_task) | Feed inputs into ecs\_alb\_service\_task module | `any` | `{}` | no | +| [task](#input\_task) | Feed inputs into ecs\_alb\_service\_task module |
object({
task_cpu = optional(number)
task_memory = optional(number)
task_role_arn = optional(string, "")
pid_mode = optional(string, null)
ipc_mode = optional(string, null)
network_mode = optional(string)
propagate_tags = optional(string)
assign_public_ip = optional(bool, false)
use_alb_security_groups = optional(bool, true)
launch_type = optional(string, "FARGATE")
scheduling_strategy = optional(string, "REPLICA")
capacity_provider_strategies = optional(list(object({
capacity_provider = string
weight = number
base = number
})), [])

deployment_minimum_healthy_percent = optional(number, null)
deployment_maximum_percent = optional(number, null)
desired_count = optional(number, 0)
min_capacity = optional(number, 1)
max_capacity = optional(number, 2)
wait_for_steady_state = optional(bool, true)
circuit_breaker_deployment_enabled = optional(bool, true)
circuit_breaker_rollback_enabled = optional(bool, true)

ecs_service_enabled = optional(bool, true)
bind_mount_volumes = optional(list(object({
name = string
host_path = string
})), [])
efs_volumes = optional(list(object({
host_path = string
name = string
efs_volume_configuration = list(object({
file_system_id = string
root_directory = string
transit_encryption = string
transit_encryption_port = string
authorization_config = list(object({
access_point_id = string
iam = string
}))
}))
})), [])
efs_component_volumes = optional(list(object({
host_path = string
name = string
efs_volume_configuration = list(object({
component = optional(string, "efs")
tenant = optional(string, null)
environment = optional(string, null)
stage = optional(string, null)

root_directory = string
transit_encryption = string
transit_encryption_port = string
authorization_config = list(object({
access_point_id = string
iam = string
}))
}))
})), [])
docker_volumes = optional(list(object({
host_path = string
name = string
docker_volume_configuration = list(object({
autoprovision = bool
driver = string
driver_opts = map(string)
labels = map(string)
scope = string
}))
})), [])
fsx_volumes = optional(list(object({
host_path = string
name = string
fsx_windows_file_server_volume_configuration = list(object({
file_system_id = string
root_directory = string
authorization_config = list(object({
credentials_parameter = string
domain = string
}))
}))
})), [])
})
| `{}` | no | | [task\_enabled](#input\_task\_enabled) | Whether or not to use the ECS task module | `bool` | `true` | no | +| [task\_exec\_policy\_arns\_map](#input\_task\_exec\_policy\_arns\_map) | A map of name to IAM Policy ARNs to attach to the generated task execution role.
The names are arbitrary, but must be known at plan time. The purpose of the name
is so that changes to one ARN do not cause a ripple effect on the other ARNs.
If you cannot provide unique names known at plan time, use `task_exec_policy_arns` instead. | `map(string)` | `{}` | no | +| [task\_iam\_role\_component](#input\_task\_iam\_role\_component) | A component that outputs an iam\_role module as 'role' for adding to the service as a whole. | `string` | `null` | no | | [task\_policy\_arns](#input\_task\_policy\_arns) | The IAM policy ARNs to attach to the ECS task IAM role | `list(string)` |
[
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
"arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess"
]
| no | +| [task\_security\_group\_component](#input\_task\_security\_group\_component) | A component that outputs security\_group\_id for adding to the service as a whole. | `string` | `null` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [unauthenticated\_paths](#input\_unauthenticated\_paths) | Unauthenticated path pattern to match | `list(string)` | `[]` | no | +| [unauthenticated\_priority](#input\_unauthenticated\_priority) | The priority for the rules without authentication, between 1 and 50000 (1 being highest priority). Must be different from `authenticated_priority` since a listener can't have multiple rules with the same priority | `string` | `0` | no | | [use\_lb](#input\_use\_lb) | Whether use load balancer for the service | `bool` | `false` | no | | [use\_rds\_client\_sg](#input\_use\_rds\_client\_sg) | Use the RDS client security group | `bool` | `false` | no | | [vanity\_alias](#input\_vanity\_alias) | The vanity aliases to use for the public LB. | `list(string)` | `[]` | no | -| [vpc\_match\_tags](#input\_vpc\_match\_tags) | The additional matching tags for the VPC data source. Used with current namespace, tenant, env, and stage tags. | `map(any)` | `{}` | no | +| [vanity\_domain](#input\_vanity\_domain) | Whether to use the vanity domain alias for the service | `string` | `null` | no | +| [zone\_component](#input\_zone\_component) | The component name to look up service domain remote-state on | `string` | `"dns-delegated"` | no | +| [zone\_component\_output](#input\_zone\_component\_output) | A json query to use to get the zone domain from the remote state. See | `string` | `".default_domain_name"` | no | ## Outputs | Name | Description | |------|-------------| -| [container\_definition](#output\_container\_definition) | Output of container definition module | | [ecs\_cluster\_arn](#output\_ecs\_cluster\_arn) | Selected ECS cluster ARN | +| [environment\_map](#output\_environment\_map) | Environment variables to pass to the container, this is a map of key/value pairs, where the key is `containerName,variableName` | | [full\_domain](#output\_full\_domain) | Domain to respond to GET requests | +| [github\_actions\_iam\_role\_arn](#output\_github\_actions\_iam\_role\_arn) | ARN of IAM role for GitHub Actions | +| [github\_actions\_iam\_role\_name](#output\_github\_actions\_iam\_role\_name) | Name of IAM role for GitHub Actions | | [lb\_arn](#output\_lb\_arn) | Selected LB ARN | | [lb\_listener\_https](#output\_lb\_listener\_https) | Selected LB HTTPS Listener | | [lb\_sg\_id](#output\_lb\_sg\_id) | Selected LB SG ID | | [logs](#output\_logs) | Output of cloudwatch logs module | +| [service\_image](#output\_service\_image) | The image of the service container | +| [ssm\_key\_prefix](#output\_ssm\_key\_prefix) | SSM prefix | +| [ssm\_parameters](#output\_ssm\_parameters) | SSM parameters for the ECS Service | | [subnet\_ids](#output\_subnet\_ids) | Selected subnet IDs | -| [task](#output\_task) | Output of service task module | +| [task\_definition\_arn](#output\_task\_definition\_arn) | The task definition ARN | +| [task\_definition\_revision](#output\_task\_definition\_revision) | The task definition revision | +| [task\_template](#output\_task\_template) | The task template rendered | | [vpc\_id](#output\_vpc\_id) | Selected VPC ID | | [vpc\_sg\_id](#output\_vpc\_sg\_id) | Selected VPC SG ID | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/ecs-service) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/ecs-service) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/ecs-service/cloud-map.tf b/modules/ecs-service/cloud-map.tf new file mode 100644 index 000000000..f5d169d47 --- /dev/null +++ b/modules/ecs-service/cloud-map.tf @@ -0,0 +1,66 @@ +// Service Connect + +module "cloudmap_namespace" { + for_each = { for service_connect in var.service_connect_configurations : service_connect.namespace => service_connect } + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = each.key + + # we ignore errors because the namespace may be a name or an arn of a namespace for the service. + ignore_errors = true + context = module.this.context +} + +locals { + valid_cloudmap_namespaces = { for k, v in module.cloudmap_namespace : k => v if v.outputs != null } + service_connect_configurations = [for service_connect in var.service_connect_configurations : merge(service_connect, { namespace = try(local.valid_cloudmap_namespaces[service_connect.namespace].outputs.name, service_connect.namespace) })] +} +// ------------------------------ + +// Service Discovery + +module "cloudmap_namespace_service_discovery" { + for_each = { for service_connect in var.service_registries : service_connect.namespace => service_connect } + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = each.key + + # we ignore errors because the namespace may be a name or an arn of a namespace for the service. + ignore_errors = true + context = module.this.context +} + +locals { + valid_cloudmap_service_discovery_namespaces = { for k, v in module.cloudmap_namespace_service_discovery : k => v if v.outputs != null } + service_discovery_configurations = [for service_registry in var.service_registries : merge(service_registry, { namespace = try(local.valid_cloudmap_service_discovery_namespaces[service_registry.namespace].outputs.name, service_registry.namespace) })] + service_config_with_id = { for service_registry in var.service_registries : service_registry.namespace => merge(service_registry, { id = try(local.valid_cloudmap_service_discovery_namespaces[service_registry.namespace].outputs.id, null) }) } + service_discovery = [for value in var.service_registries : merge(value, { + registry_arn = aws_service_discovery_service.default[value.namespace].arn + })] +} + +resource "aws_service_discovery_service" "default" { + for_each = local.service_config_with_id + name = module.this.name + + dns_config { + namespace_id = each.value.id + + dns_records { + ttl = 10 + type = "A" + } + + routing_policy = "MULTIVALUE" + } + + health_check_custom_config { + failure_threshold = 1 + } +} + +// ------------------------------ diff --git a/modules/ecs-service/datadog-agent.tf b/modules/ecs-service/datadog-agent.tf new file mode 100644 index 000000000..6b654dc22 --- /dev/null +++ b/modules/ecs-service/datadog-agent.tf @@ -0,0 +1,156 @@ +variable "datadog_agent_sidecar_enabled" { + type = bool + default = false + description = "Enable the Datadog Agent Sidecar" +} + +variable "datadog_log_method_is_firelens" { + type = bool + default = false + description = "Datadog logs can be sent via cloudwatch logs (and lambda) or firelens, set this to true to enable firelens via a sidecar container for fluentbit" +} + +variable "datadog_sidecar_containers_logs_enabled" { + type = bool + default = true + description = "Enable the Datadog Agent Sidecar to send logs to aws cloudwatch group, requires `datadog_agent_sidecar_enabled` to be true" +} + +variable "datadog_logging_tags" { + type = map(string) + default = null + description = "Tags to add to all logs sent to Datadog" +} + +variable "datadog_logging_default_tags_enabled" { + type = bool + default = true + description = "Add Default tags to all logs sent to Datadog" +} + +locals { + default_datadog_tags = var.datadog_logging_default_tags_enabled ? { + env = module.this.stage + account = format("%s-%s-%s", module.this.tenant, module.this.environment, module.this.stage) + } : null + + all_dd_tags = join(",", [for k, v in merge(local.default_datadog_tags, var.datadog_logging_tags) : format("%s:%s", k, v)]) + + datadog_logconfiguration_firelens = { + logDriver = "awsfirelens" + options = var.datadog_agent_sidecar_enabled ? { + Name = "datadog", + apikey = one(module.datadog_configuration[*].outputs.datadog_api_key), + Host = format("http-intake.logs.%s", one(module.datadog_configuration[*].outputs.datadog_site)) + dd_service = module.this.name, + dd_tags = local.all_dd_tags, + dd_source = "ecs", + dd_message_key = "log", + TLS = "on", + provider = "ecs" + } : {} + } +} + +module "datadog_sidecar_logs" { + source = "cloudposse/cloudwatch-logs/aws" + version = "0.6.6" + + # if we are using datadog firelens we don't need to create a log group + count = local.enabled && var.datadog_agent_sidecar_enabled && var.datadog_sidecar_containers_logs_enabled ? 1 : 0 + + stream_names = lookup(var.logs, "stream_names", []) + retention_in_days = lookup(var.logs, "retention_in_days", 90) + + principals = merge({ + Service = ["ecs.amazonaws.com", "ecs-tasks.amazonaws.com"] + }, lookup(var.logs, "principals", {})) + + additional_permissions = concat([ + "logs:CreateLogStream", + "logs:DeleteLogStream", + ], lookup(var.logs, "additional_permissions", [])) + + context = module.this.context +} + +module "datadog_container_definition" { + source = "cloudposse/ecs-container-definition/aws" + version = "0.58.1" + + count = local.enabled && var.datadog_agent_sidecar_enabled ? 1 : 0 + + container_cpu = 256 + container_memory = 512 + container_name = "datadog-agent" + container_image = "public.ecr.aws/datadog/agent:latest" + essential = true + map_environment = { + "ECS_FARGATE" = var.task.launch_type == "FARGATE" ? true : false + "DD_API_KEY" = one(module.datadog_configuration[*].outputs.datadog_api_key) + "DD_SITE" = one(module.datadog_configuration[*].outputs.datadog_site) + "DD_ENV" = module.this.stage + "DD_LOGS_ENABLED" = true + "DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL" = true + "SD_BACKEND" = "docker" + "DD_PROCESS_AGENT_ENABLED" = true + "DD_DOGSTATSD_NON_LOCAL_TRAFFIC" = true + "DD_APM_ENABLED" = true + "DD_CONTAINER_LABELS_AS_TAGS" = jsonencode({ + "org.opencontainers.image.revision" = "version" + }) + } + + // Datadog DogStatsD/tracing ports + port_mappings = [{ + containerPort = 8125 + hostPort = 8125 + protocol = "udp" + }, { + containerPort = 8126 + hostPort = 8126 + protocol = "tcp" + }] + + log_configuration = var.datadog_sidecar_containers_logs_enabled ? { + logDriver = "awslogs" + options = { + "awslogs-group" = one(module.datadog_sidecar_logs[*].log_group_name) + "awslogs-region" = var.region + "awslogs-stream-prefix" = "datadog-agent" + } + } : null +} + +module "datadog_fluent_bit_container_definition" { + source = "cloudposse/ecs-container-definition/aws" + version = "0.58.1" + + count = local.enabled && var.datadog_agent_sidecar_enabled ? 1 : 0 + + container_cpu = 256 + container_memory = 512 + container_name = "datadog-log-router" + # From Datadog Support: + # In this case, the newest container image with the latest tag (corresponding to version 2.29.0) looks like it is crashing for certain customers, which is causing the Task to deprovision. + # Note: We recommend customers to use the stable tag for this type of reason + container_image = "amazon/aws-for-fluent-bit:stable" + essential = true + firelens_configuration = { + type = "fluentbit" + options = { + config-file-type = "file", + config-file-value = "/fluent-bit/configs/parse-json.conf", + enable-ecs-log-metadata = "true" + } + } + + log_configuration = var.datadog_sidecar_containers_logs_enabled ? { + logDriver = "awslogs" + options = { + "awslogs-group" = one(module.datadog_sidecar_logs[*].log_group_name) + "awslogs-region" = var.region + "awslogs-stream-prefix" = "datadog-log-router" + } + } : null +} diff --git a/modules/ecs-service/github-actions-iam-policy.tf b/modules/ecs-service/github-actions-iam-policy.tf new file mode 100644 index 000000000..1500ce8aa --- /dev/null +++ b/modules/ecs-service/github-actions-iam-policy.tf @@ -0,0 +1,176 @@ +variable "github_oidc_trusted_role_arns" { + type = list(string) + description = "A list of IAM Role ARNs allowed to assume this cluster's GitHub OIDC role" + default = [] +} + +variable "github_actions_ecspresso_enabled" { + type = bool + description = "Create IAM policies required for deployments with Ecspresso" + default = false +} + +locals { + github_actions_iam_policy = data.aws_iam_policy_document.github_actions_iam_policy.json +} + +data "aws_iam_policy_document" "github_actions_iam_policy" { + source_policy_documents = compact([ + data.aws_iam_policy_document.github_actions_iam_platform_policy.json, + join("", data.aws_iam_policy_document.github_actions_iam_ecspresso_policy[*]["json"]) + ]) +} + +data "aws_iam_policy_document" "github_actions_iam_platform_policy" { + # Allows trusted roles to assume this role + dynamic "statement" { + for_each = length(var.github_oidc_trusted_role_arns) == 0 ? [] : ["enabled"] + content { + sid = "TrustedRoleAccess" + effect = "Allow" + actions = [ + "sts:AssumeRole", + "sts:TagSession" + ] + resources = var.github_oidc_trusted_role_arns + } + } + + # Allow chamber to read secrets + statement { + sid = "AllowKMSAccess" + effect = "Allow" + actions = [ + "kms:Decrypt", + "kms:DescribeKey" + ] + #bridgecrew:skip=BC_AWS_IAM_57:OK Allow to Decrypt with any key. + resources = [ + "*" + ] + } + + statement { + effect = "Allow" + actions = [ + "ssm:GetParameters", + "ssm:GetParameter", + "ssm:PutParameter" + ] + resources = concat([ + "arn:aws:ssm:*:*:parameter${format("/%s/%s/*", var.chamber_service, var.name)}" + ], formatlist("arn:aws:ssm:*:*:parameter%s", keys(local.url_params))) + } + + statement { + effect = "Allow" + actions = [ + "ssm:DescribeParameters", + "ssm:GetParametersByPath" + ] + #bridgecrew:skip=BC_AWS_IAM_57:OK Allow to read from any ssm parameter store for chamber. + resources = [ + "*" + ] + } +} + +data "aws_caller_identity" "current" {} + +locals { + aws_partition = module.iam_roles.aws_partition + account_id = data.aws_caller_identity.current.account_id +} + +data "aws_iam_policy_document" "github_actions_iam_ecspresso_policy" { + count = var.github_actions_ecspresso_enabled ? 1 : 0 + + statement { + effect = "Allow" + actions = [ + "ecs:DescribeServices", + "ecs:UpdateService", + "ecs:ListTagsForResource" + ] + resources = [ + join("", module.ecs_alb_service_task[*]["service_arn"]) + ] + } + + statement { + effect = "Allow" + actions = [ + "ecs:RunTask", + ] + resources = [ + format("arn:%s:ecs:%s:%s:task-definition/%s:*", local.aws_partition, var.region, local.account_id, join("", module.ecs_alb_service_task.*.task_definition_family)), + ] + } + + statement { + effect = "Allow" + actions = [ + "ecs:RegisterTaskDefinition", + "ecs:TagResource", + "ecs:DescribeTaskDefinition", + "ecs:DescribeTasks", + "application-autoscaling:DescribeScalableTargets" + ] + resources = [ + "*" + ] + } + + statement { + sid = "logs" + effect = "Allow" + actions = [ + "logs:Describe*", + "logs:Get*", + "logs:List*", + "logs:StartQuery", + "logs:StopQuery", + "logs:TestMetricFilter", + "logs:FilterLogEvents", + "oam:ListSinks" + ] + resources = [ + "*" + ] + } + + statement { + effect = "Allow" + actions = [ + "iam:PassRole" + ] + resources = [ + join("", module.ecs_alb_service_task[*]["task_exec_role_arn"]), + join("", module.ecs_alb_service_task[*]["task_role_arn"]), + ] + } + + dynamic "statement" { + for_each = local.s3_mirroring_enabled ? ["enabled"] : [] + content { + effect = "Allow" + actions = ["s3:ListBucket"] + resources = ["*"] + } + } + + dynamic "statement" { + for_each = local.s3_mirroring_enabled ? ["enabled"] : [] + content { + effect = "Allow" + actions = [ + "s3:PutObject", + "s3:GetObject", + "s3:HeadObject", + ] + resources = [ + format("%s/%s/%s/*", lookup(module.s3[0].outputs, "bucket_arn", null), module.ecs_cluster.outputs.cluster_name, module.this.id) + ] + } + } +} diff --git a/modules/ecs-service/github-actions-iam-role.mixin.tf b/modules/ecs-service/github-actions-iam-role.mixin.tf new file mode 100644 index 000000000..de68c6602 --- /dev/null +++ b/modules/ecs-service/github-actions-iam-role.mixin.tf @@ -0,0 +1,72 @@ +# This mixin requires that a local variable named `github_actions_iam_policy` be defined +# and its value to be a JSON IAM Policy Document defining the permissions for the role. +# It also requires that the `github-oidc-provider` has been previously installed and the +# `github-assume-role-policy.mixin.tf` has been added to `account-map/modules/team-assume-role-policy`. + +variable "github_actions_iam_role_enabled" { + type = bool + description = <<-EOF + Flag to toggle creation of an IAM Role that GitHub Actions can assume to access AWS resources + EOF + default = false +} + +variable "github_actions_allowed_repos" { + type = list(string) + description = < 0 +} + +module "gha_role_name" { + source = "cloudposse/label/null" + version = "0.25.0" + + enabled = local.github_actions_iam_role_enabled + attributes = compact(concat(var.github_actions_iam_role_attributes, ["gha"])) + + context = module.this.context +} + +module "gha_assume_role" { + source = "../account-map/modules/team-assume-role-policy" + + trusted_github_repos = var.github_actions_allowed_repos + + context = module.gha_role_name.context +} + +resource "aws_iam_role" "github_actions" { + count = local.github_actions_iam_role_enabled ? 1 : 0 + name = module.gha_role_name.id + assume_role_policy = module.gha_assume_role.github_assume_role_policy + + inline_policy { + name = module.gha_role_name.id + policy = local.github_actions_iam_policy + } +} + +output "github_actions_iam_role_arn" { + value = one(aws_iam_role.github_actions[*].arn) + description = "ARN of IAM role for GitHub Actions" +} + +output "github_actions_iam_role_name" { + value = one(aws_iam_role.github_actions[*].name) + description = "Name of IAM role for GitHub Actions" +} diff --git a/modules/ecs-service/main.tf b/modules/ecs-service/main.tf index 005c3d79c..33914deaa 100644 --- a/modules/ecs-service/main.tf +++ b/modules/ecs-service/main.tf @@ -2,25 +2,91 @@ locals { enabled = module.this.enabled + s3_mirroring_enabled = local.enabled && try(length(var.s3_mirror_name) > 0, false) + service_container = lookup(var.containers, "service") # Get the first containerPort in var.container["service"]["port_mappings"] - container_port = lookup(local.service_container, "port_mappings")[0].containerPort + container_port = try(lookup(local.service_container, "port_mappings")[0].containerPort, null) - assign_public_ip = lookup(var.task, "assign_public_ip", false) + assign_public_ip = lookup(local.task, "assign_public_ip", false) - container_definition = [ + container_definition = concat([ for container in module.container_definition : container.json_map_object + ], + [ + for container in module.datadog_container_definition : + container.json_map_object + ], + var.datadog_log_method_is_firelens ? [ + for container in module.datadog_fluent_bit_container_definition : + container.json_map_object + ] : [], + ) + + kinesis_kms_id = try(one(data.aws_kms_alias.selected[*].id), null) + + use_alb_security_group = local.is_alb ? lookup(local.task, "use_alb_security_group", true) : false + + task_definition_s3_key = format("%s/%s/task-definition.json", module.ecs_cluster.outputs.cluster_name, module.this.id) + task_definition_use_s3 = local.enabled && local.s3_mirroring_enabled && contains(flatten(data.aws_s3_objects.mirror[*].keys), local.task_definition_s3_key) + task_definition_s3_objects = flatten(data.aws_s3_objects.mirror[*].keys) + + task_definition_s3 = try(jsondecode(data.aws_s3_object.task_definition[0].body), {}) + + task_s3 = local.task_definition_use_s3 ? { + launch_type = try(local.task_definition_s3.requiresCompatibilities[0], null) + network_mode = lookup(local.task_definition_s3, "networkMode", null) + task_memory = try(tonumber(lookup(local.task_definition_s3, "memory")), null) + task_cpu = try(tonumber(lookup(local.task_definition_s3, "cpu")), null) + } : {} + + task = merge(var.task, local.task_s3) + + efs_component_volumes = lookup(local.task, "efs_component_volumes", []) + efs_component_map = { + for efs in local.efs_component_volumes : efs["name"] => efs + } + efs_component_remote_state = { + for efs in local.efs_component_volumes : efs["name"] => module.efs[efs["name"]].outputs + } + efs_component_merged = [ + for efs_volume_name, efs_component_output in local.efs_component_remote_state : { + host_path = local.efs_component_map[efs_volume_name].host_path + name = efs_volume_name + efs_volume_configuration = [ + #again this is a hardcoded array because AWS does not support multiple configurations per volume + { + file_system_id = efs_component_output.efs_id + root_directory = local.efs_component_map[efs_volume_name].efs_volume_configuration[0].root_directory + transit_encryption = local.efs_component_map[efs_volume_name].efs_volume_configuration[0].transit_encryption + transit_encryption_port = local.efs_component_map[efs_volume_name].efs_volume_configuration[0].transit_encryption_port + authorization_config = local.efs_component_map[efs_volume_name].efs_volume_configuration[0].authorization_config + } + ] + } ] + efs_volumes = concat(lookup(local.task, "efs_volumes", []), local.efs_component_merged) +} - role_name = format("%s-%s-%s-%s-%s-role", var.namespace, var.tenant, var.environment, var.stage, var.name) +data "aws_s3_objects" "mirror" { + count = local.s3_mirroring_enabled ? 1 : 0 + bucket = lookup(module.s3[0].outputs, "bucket_id", null) + prefix = format("%s/%s", module.ecs_cluster.outputs.cluster_name, module.this.id) +} - kinesis_kms_id = try(one(data.aws_kms_alias.selected[*].id), null) +data "aws_s3_object" "task_definition" { + count = local.task_definition_use_s3 ? 1 : 0 + bucket = lookup(module.s3[0].outputs, "bucket_id", null) + key = try(element(local.task_definition_s3_objects, index(local.task_definition_s3_objects, local.task_definition_s3_key)), null) } module "logs" { source = "cloudposse/cloudwatch-logs/aws" - version = "0.6.6" + version = "0.6.8" + + # if we are using datadog firelens we don't need to create a log group + count = local.enabled && (!var.datadog_agent_sidecar_enabled || !var.datadog_log_method_is_firelens) ? 1 : 0 stream_names = lookup(var.logs, "stream_names", []) retention_in_days = lookup(var.logs, "retention_in_days", 90) @@ -37,65 +103,160 @@ module "logs" { context = module.this.context } +module "roles_to_principals" { + source = "../account-map/modules/roles-to-principals" + context = module.this.context + role_map = {} +} + +locals { + container_chamber = { + for name, result in data.aws_ssm_parameters_by_path.default : + name => { for key, value in zipmap(result.names, result.values) : element(reverse(split("/", key)), 0) => value } + } + + container_aliases = { + for name, settings in var.containers : + settings["name"] => name if local.enabled + } + + container_s3 = { + for item in lookup(local.task_definition_s3, "containerDefinitions", []) : + local.container_aliases[item.name] => { container_definition = item } + } + + containers_priority_terraform = { + for name, settings in var.containers : + name => merge(local.container_chamber[name], lookup(local.container_s3, name, {}), settings, ) + if local.enabled + } + containers_priority_s3 = { + for name, settings in var.containers : + name => merge(settings, local.container_chamber[name], lookup(local.container_s3, name, {})) + if local.enabled + } +} + +data "aws_ssm_parameters_by_path" "default" { + for_each = { for k, v in var.containers : k => v if local.enabled } + path = format("/%s/%s/%s", var.chamber_service, var.name, each.key) +} + +locals { + containers_envs = merge([ + for name, settings in var.containers : + { for k, v in lookup(settings, "map_environment", {}) : "${name},${k}" => v if local.enabled } + ]...) +} + + +data "template_file" "envs" { + for_each = { for k, v in local.containers_envs : k => v if local.enabled } + + template = replace(each.value, "$$", "$") + + vars = { + stage = module.this.stage + namespace = module.this.namespace + name = module.this.name + full_domain = local.full_domain + vanity_domain = var.vanity_domain + # `service_domain` uses whatever the current service is (public/private) + service_domain = local.domain_no_service_name + service_domain_public = local.public_domain_no_service_name + service_domain_private = local.private_domain_no_service_name + } +} + +locals { + env_map_subst = { + for k, v in data.template_file.envs : + k => v.rendered + } + map_secrets = { for k, v in local.containers_priority_terraform : k => lookup(v, "map_secrets", null) != null ? zipmap( + keys(lookup(v, "map_secrets", null)), + formatlist("%s/%s", format("arn:aws:ssm:%s:%s:parameter", var.region, module.roles_to_principals.full_account_map[format("%s-%s", var.tenant, var.stage)]), + values(lookup(v, "map_secrets", null))) + ) : null } +} + module "container_definition" { source = "cloudposse/ecs-container-definition/aws" - version = "0.58.1" + version = "0.61.1" - for_each = var.containers + for_each = { for k, v in local.containers_priority_terraform : k => v if local.enabled } - container_name = lookup(each.value, "name") + container_name = each.value["name"] container_image = lookup(each.value, "ecr_image", null) != null ? format( "%s.dkr.ecr.%s.amazonaws.com/%s", - module.iam_roles.account_map.full_account_map[var.ecr_stage_name], + module.roles_to_principals.full_account_map[var.ecr_stage_name], coalesce(var.ecr_region, var.region), - lookup(each.value, "ecr_image", null), - ) : lookup(each.value, "image") + lookup(local.containers_priority_s3[each.key], "ecr_image", null) + ) : lookup(local.containers_priority_s3[each.key], "image") - container_memory = lookup(each.value, "memory", null) - container_memory_reservation = lookup(each.value, "memory_reservation", null) - container_cpu = lookup(each.value, "cpu", null) - essential = lookup(each.value, "essential", true) - readonly_root_filesystem = lookup(each.value, "readonly_root_filesystem", null) + container_memory = each.value["memory"] + container_memory_reservation = each.value["memory_reservation"] + container_cpu = each.value["cpu"] + essential = each.value["essential"] + readonly_root_filesystem = each.value["readonly_root_filesystem"] + mount_points = each.value["mount_points"] map_environment = lookup(each.value, "map_environment", null) != null ? merge( - lookup(each.value, "map_environment", {}), + { for k, v in local.env_map_subst : split(",", k)[1] => v if split(",", k)[0] == each.key }, { "APP_ENV" = format("%s-%s-%s-%s", var.namespace, var.tenant, var.environment, var.stage) }, { "RUNTIME_ENV" = format("%s-%s-%s", var.namespace, var.tenant, var.stage) }, - { "CLUSTER_NAME" = try(one(data.aws_ecs_cluster.selected[*].cluster_name), null) } + { "CLUSTER_NAME" = module.ecs_cluster.outputs.cluster_name }, + var.datadog_agent_sidecar_enabled ? { + "DD_DOGSTATSD_PORT" = 8125, + "DD_TRACING_ENABLED" = "true", + "DD_SERVICE_NAME" = var.name, + "DD_ENV" = var.stage, + "DD_PROFILING_EXPORTERS" = "agent" + } : {}, + lookup(each.value, "map_environment", null) ) : null - map_secrets = lookup(each.value, "map_secrets", null) != null ? zipmap( - keys(lookup(each.value, "map_secrets", null)), - formatlist("%s/%s", format("arn:aws:ssm:%s:%s:parameter", - coalesce(var.ecr_region, var.region), module.iam_roles.account_map.full_account_map[format("%s-%s", var.tenant, var.stage)]), - values(lookup(each.value, "map_secrets", null))) - ) : null - port_mappings = lookup(each.value, "port_mappings", []) - command = lookup(each.value, "command", null) - entrypoint = lookup(each.value, "entrypoint", null) - healthcheck = lookup(each.value, "healthcheck", null) - ulimits = lookup(each.value, "ulimits", null) - volumes_from = lookup(each.value, "volumes_from", null) - - log_configuration = lookup(each.value["log_configuration"], "logDriver", {}) == "awslogs" ? merge(lookup(each.value, "log_configuration", {}), { - options = { - "awslogs-region" = var.region - "awslogs-group" = module.logs.log_group_name - "awslogs-stream-prefix" = var.name - } - }) : lookup(each.value, "log_configuration", {}) + map_secrets = local.map_secrets[each.key] + + port_mappings = each.value["port_mappings"] + command = each.value["command"] + entrypoint = each.value["entrypoint"] + healthcheck = each.value["healthcheck"] + ulimits = each.value["ulimits"] + volumes_from = each.value["volumes_from"] + docker_labels = each.value["docker_labels"] + container_depends_on = each.value["container_depends_on"] + privileged = each.value["privileged"] + + log_configuration = lookup(lookup(each.value, "log_configuration", {}), "logDriver", {}) == "awslogs" ? merge(lookup(each.value, "log_configuration", {}), { + logDriver = "awslogs" + options = tomap({ + awslogs-region = var.region, + awslogs-group = local.awslogs_group, + awslogs-stream-prefix = coalesce(each.value["name"], each.key), + }) + # if we are not using awslogs, we execute this line, which if we have dd enabled, means we are using firelens, so merge that config in. + }) : merge(lookup(each.value, "log_configuration", {}), local.datadog_logconfiguration_firelens) + firelens_configuration = lookup(each.value, "firelens_configuration", null) + # escape hatch for anything not specifically described above or unsupported by the upstream module - container_definition = lookup(each.value, "container_definition", {}) + # March 2024: Removing this as it always prioritizes the s3 task definition + # container_definition = lookup(each.value, "container_definition", {}) +} + +locals { + awslogs_group = var.datadog_log_method_is_firelens ? "" : join("", module.logs[*].log_group_name) + external_security_group = try(module.security_group[*].outputs.security_group_id, []) } module "ecs_alb_service_task" { source = "cloudposse/ecs-alb-service-task/aws" - version = "0.66.0" + version = "0.72.0" - count = var.enabled ? 1 : 0 + count = local.enabled ? 1 : 0 ecs_cluster_arn = local.ecs_cluster_arn vpc_id = local.vpc_id @@ -104,88 +265,114 @@ module "ecs_alb_service_task" { container_definition_json = jsonencode(local.container_definition) # This is set to true to allow ingress from the ALB sg - use_alb_security_group = lookup(var.task, "use_alb_security_group", true) + use_alb_security_group = local.use_alb_security_group container_port = local.container_port alb_security_group = local.lb_sg_id - security_group_ids = compact([local.vpc_sg_id, local.rds_sg_id]) + security_group_ids = compact(concat([local.vpc_sg_id, local.rds_sg_id], local.external_security_group)) + + nlb_cidr_blocks = local.is_nlb ? [module.vpc.outputs.vpc_cidr] : [] + nlb_container_port = local.is_nlb ? local.container_port : 80 + use_nlb_cidr_blocks = local.is_nlb # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_service#load_balancer - ecs_load_balancers = var.use_lb ? [ + ecs_load_balancers = local.use_lb ? [ { container_name = lookup(local.service_container, "name"), container_port = local.container_port, - target_group_arn = module.alb_ingress[0].target_group_arn + target_group_arn = local.is_alb ? module.alb_ingress[0].target_group_arn : local.nlb.default_target_group_arn # not required since elb is unused but must be set to null elb_name = null }, ] : [] assign_public_ip = local.assign_public_ip - ignore_changes_task_definition = lookup(var.task, "ignore_changes_task_definition", false) - ignore_changes_desired_count = lookup(var.task, "ignore_changes_desired_count", true) - launch_type = lookup(var.task, "launch_type", "FARGATE") - network_mode = lookup(var.task, "network_mode", "awsvpc") - propagate_tags = lookup(var.task, "propagate_tags", "SERVICE") - deployment_minimum_healthy_percent = lookup(var.task, "deployment_minimum_healthy_percent", null) - deployment_maximum_percent = lookup(var.task, "deployment_maximum_percent", null) - deployment_controller_type = lookup(var.task, "deployment_controller_type", null) - desired_count = lookup(var.task, "desired_count", 0) - task_memory = lookup(var.task, "task_memory", null) - task_cpu = lookup(var.task, "task_cpu", null) - wait_for_steady_state = lookup(var.task, "wait_for_steady_state", true) - circuit_breaker_deployment_enabled = lookup(var.task, "circuit_breaker_deployment_enabled", true) - circuit_breaker_rollback_enabled = lookup(var.task, "circuit_breaker_rollback_enabled ", true) - task_policy_arns = tolist(aws_iam_policy.default[*].arn) - ecs_service_enabled = lookup(var.task, "ecs_service_enabled", true) + ignore_changes_task_definition = lookup(local.task, "ignore_changes_task_definition", false) + ignore_changes_desired_count = lookup(local.task, "ignore_changes_desired_count", true) + launch_type = lookup(local.task, "launch_type", "FARGATE") + scheduling_strategy = lookup(local.task, "scheduling_strategy", "REPLICA") + network_mode = lookup(local.task, "network_mode", "awsvpc") + pid_mode = local.task["pid_mode"] + ipc_mode = local.task["ipc_mode"] + propagate_tags = lookup(local.task, "propagate_tags", "SERVICE") + deployment_minimum_healthy_percent = lookup(local.task, "deployment_minimum_healthy_percent", null) + deployment_maximum_percent = lookup(local.task, "deployment_maximum_percent", null) + deployment_controller_type = lookup(local.task, "deployment_controller_type", null) + desired_count = lookup(local.task, "desired_count", 0) + task_memory = lookup(local.task, "task_memory", null) + task_cpu = lookup(local.task, "task_cpu", null) + wait_for_steady_state = lookup(local.task, "wait_for_steady_state", true) + circuit_breaker_deployment_enabled = lookup(local.task, "circuit_breaker_deployment_enabled", true) + circuit_breaker_rollback_enabled = lookup(local.task, "circuit_breaker_rollback_enabled", true) + task_policy_arns = var.iam_policy_enabled ? concat(var.task_policy_arns, aws_iam_policy.default[*].arn) : var.task_policy_arns + ecs_service_enabled = lookup(local.task, "ecs_service_enabled", true) + task_role_arn = lookup(local.task, "task_role_arn", one(module.iam_role[*]["outputs"]["role"]["arn"])) + capacity_provider_strategies = lookup(local.task, "capacity_provider_strategies") + + task_exec_policy_arns_map = var.task_exec_policy_arns_map + + efs_volumes = local.efs_volumes + docker_volumes = lookup(local.task, "docker_volumes", []) + fsx_volumes = lookup(local.task, "fsx_volumes", []) + bind_mount_volumes = lookup(local.task, "bind_mount_volumes", []) + + exec_enabled = var.exec_enabled + service_connect_configurations = local.service_connect_configurations + service_registries = local.service_discovery + depends_on = [ + module.alb_ingress + ] context = module.this.context } -# This resource is used instead of the ecs_alb_service_task module's `var.task_policy_arns` because -# the upstream module uses a "count" instead of a "for_each" -# -# See https://github.com/cloudposse/terraform-aws-ecs-alb-service-task/issues/167 -resource "aws_iam_role_policy_attachment" "task" { - for_each = local.enabled && length(var.task_policy_arns) > 0 ? toset(var.task_policy_arns) : toset([]) - - policy_arn = each.value - role = try(one(module.ecs_alb_service_task[*].task_role_name), null) -} - - -module "alb_ecs_label" { - source = "cloudposse/label/null" - version = "0.25.0" # requires Terraform >= 0.13.0 - - namespace = "" - environment = "" - tenant = "" - stage = "" - - context = module.this.context +resource "aws_security_group_rule" "custom_sg_rules" { + for_each = local.enabled && var.custom_security_group_rules != [] ? { + for sg_rule in var.custom_security_group_rules : + format("%s_%s_%s", sg_rule.protocol, sg_rule.from_port, sg_rule.to_port) => sg_rule + } : {} + description = each.value.description + type = each.value.type + from_port = each.value.from_port + to_port = each.value.to_port + protocol = each.value.protocol + cidr_blocks = each.value.cidr_blocks + security_group_id = one(module.ecs_alb_service_task[*].service_security_group_id) } module "alb_ingress" { source = "cloudposse/alb-ingress/aws" - version = "0.24.3" - - count = var.use_lb ? 1 : 0 + version = "0.28.0" - target_group_name = module.alb_ecs_label.id + count = local.is_alb ? 1 : 0 vpc_id = local.vpc_id unauthenticated_listener_arns = [local.lb_listener_https_arn] - unauthenticated_hosts = [local.full_domain] - unauthenticated_priority = 0 - default_target_group_enabled = true - health_check_matcher = "200-404" + unauthenticated_hosts = var.lb_catch_all ? [format("*.%s", var.vanity_domain), local.full_domain] : concat([ + local.full_domain + ], var.vanity_alias, var.additional_targets) + unauthenticated_paths = flatten(var.unauthenticated_paths) + # When set to catch-all, make priority super high to make sure last to match + unauthenticated_priority = var.lb_catch_all ? 99 : var.unauthenticated_priority + default_target_group_enabled = true + + health_check_matcher = var.health_check_matcher + health_check_path = var.health_check_path + health_check_port = var.health_check_port + health_check_healthy_threshold = var.health_check_healthy_threshold + health_check_unhealthy_threshold = var.health_check_unhealthy_threshold + health_check_interval = var.health_check_interval + health_check_timeout = var.health_check_timeout + + stickiness_enabled = var.stickiness_enabled + stickiness_type = var.stickiness_type + stickiness_cookie_duration = var.stickiness_cookie_duration context = module.this.context } data "aws_iam_policy_document" "this" { - count = var.iam_policy_enabled ? 1 : 0 + count = local.enabled && var.iam_policy_enabled ? 1 : 0 dynamic "statement" { # Only flatten if a list(string) is passed in, otherwise use the map var as-is @@ -233,8 +420,10 @@ data "aws_iam_policy_document" "this" { } resource "aws_iam_policy" "default" { - count = local.enabled && var.iam_policy_enabled ? 1 : 0 - policy = join("", data.aws_iam_policy_document.this.*.json) + count = local.enabled && var.iam_policy_enabled ? 1 : 0 + + name = format("%s-task-access", module.this.id) + policy = join("", data.aws_iam_policy_document.this[*]["json"]) tags_all = module.this.tags } @@ -242,6 +431,8 @@ module "vanity_alias" { source = "cloudposse/route53-alias/aws" version = "0.13.0" + count = local.enabled ? 1 : 0 + aliases = var.vanity_alias parent_zone_id = local.vanity_domain_zone_id target_dns_name = local.lb_name @@ -254,12 +445,12 @@ module "ecs_cloudwatch_autoscaling" { source = "cloudposse/ecs-cloudwatch-autoscaling/aws" version = "0.7.3" - count = var.task_enabled ? 1 : 0 + count = local.enabled && var.task_enabled && var.autoscaling_enabled ? 1 : 0 service_name = module.ecs_alb_service_task[0].service_name - cluster_name = try(one(data.aws_ecs_cluster.selected[*].cluster_name), null) - min_capacity = lookup(var.task, "min_capacity", 1) - max_capacity = lookup(var.task, "max_capacity", 2) + cluster_name = module.ecs_cluster.outputs.cluster_name + min_capacity = lookup(local.task, "min_capacity", 1) + max_capacity = lookup(local.task, "max_capacity", 2) scale_up_adjustment = 1 scale_up_cooldown = 60 scale_down_adjustment = -1 @@ -272,8 +463,86 @@ module "ecs_cloudwatch_autoscaling" { ] } +locals { + scale_up_policy_arn = try(module.ecs_cloudwatch_autoscaling[0].scale_up_policy_arn, "") + scale_down_policy_arn = try(module.ecs_cloudwatch_autoscaling[0].scale_down_policy_arn, "") + + cpu_utilization_high_alarm_actions = var.autoscaling_enabled && var.autoscaling_dimension == "cpu" ? local.scale_up_policy_arn : "" + cpu_utilization_low_alarm_actions = var.autoscaling_enabled && var.autoscaling_dimension == "cpu" ? local.scale_down_policy_arn : "" + memory_utilization_high_alarm_actions = var.autoscaling_enabled && var.autoscaling_dimension == "memory" ? local.scale_up_policy_arn : "" + memory_utilization_low_alarm_actions = var.autoscaling_enabled && var.autoscaling_dimension == "memory" ? local.scale_down_policy_arn : "" +} + +module "ecs_cloudwatch_sns_alarms" { + source = "cloudposse/ecs-cloudwatch-sns-alarms/aws" + version = "0.12.3" + count = local.enabled && var.autoscaling_enabled ? 1 : 0 + + cluster_name = module.ecs_cluster.outputs.cluster_name + service_name = module.ecs_alb_service_task[0].service_name + + cpu_utilization_high_threshold = var.cpu_utilization_high_threshold + cpu_utilization_high_evaluation_periods = var.cpu_utilization_high_evaluation_periods + cpu_utilization_high_period = var.cpu_utilization_high_period + + cpu_utilization_high_alarm_actions = compact( + concat( + var.cpu_utilization_high_alarm_actions, + [local.cpu_utilization_high_alarm_actions], + ) + ) + + cpu_utilization_high_ok_actions = var.cpu_utilization_high_ok_actions + + cpu_utilization_low_threshold = var.cpu_utilization_low_threshold + cpu_utilization_low_evaluation_periods = var.cpu_utilization_low_evaluation_periods + cpu_utilization_low_period = var.cpu_utilization_low_period + + cpu_utilization_low_alarm_actions = compact( + concat( + var.cpu_utilization_low_alarm_actions, + [local.cpu_utilization_low_alarm_actions], + ) + ) + + cpu_utilization_low_ok_actions = var.cpu_utilization_low_ok_actions + + memory_utilization_high_threshold = var.memory_utilization_high_threshold + memory_utilization_high_evaluation_periods = var.memory_utilization_high_evaluation_periods + memory_utilization_high_period = var.memory_utilization_high_period + + memory_utilization_high_alarm_actions = compact( + concat( + var.memory_utilization_high_alarm_actions, + [local.memory_utilization_high_alarm_actions], + ) + ) + + memory_utilization_high_ok_actions = var.memory_utilization_high_ok_actions + + memory_utilization_low_threshold = var.memory_utilization_low_threshold + memory_utilization_low_evaluation_periods = var.memory_utilization_low_evaluation_periods + memory_utilization_low_period = var.memory_utilization_low_period + + memory_utilization_low_alarm_actions = compact( + concat( + var.memory_utilization_low_alarm_actions, + [local.memory_utilization_low_alarm_actions], + ) + ) + + memory_utilization_low_ok_actions = var.memory_utilization_low_ok_actions + + context = module.this.context + + depends_on = [ + module.ecs_alb_service_task + ] +} + resource "aws_kinesis_stream" "default" { - count = local.enabled && var.kinesis_enabled ? 1 : 0 + count = local.enabled && var.kinesis_enabled ? 1 : 0 + name = format("%s-%s", module.this.id, "kinesis-stream") shard_count = var.shard_count retention_period = var.retention_period @@ -291,3 +560,35 @@ resource "aws_kinesis_stream" "default" { ] } } + +data "aws_ecs_task_definition" "created_task" { + count = local.s3_mirroring_enabled ? 1 : 0 + task_definition = module.ecs_alb_service_task[0].task_definition_family + depends_on = [ + module.ecs_alb_service_task + ] +} + +locals { + created_task_definition = local.s3_mirroring_enabled ? data.aws_ecs_task_definition.created_task[0] : {} + task_template = local.s3_mirroring_enabled ? { + containerDefinitions = local.container_definition + family = lookup(local.created_task_definition, "family", null), + taskRoleArn = lookup(local.created_task_definition, "task_role_arn", null), + executionRoleArn = lookup(local.created_task_definition, "execution_role_arn", null), + networkMode = lookup(local.created_task_definition, "network_mode", null), + # we explicitly do not put the volumes here. That should be merged in by GHA + requiresCompatibilities = [lookup(local.task, "launch_type", "FARGATE")] + cpu = tostring(lookup(local.task, "task_cpu", null)) + memory = tostring(lookup(local.task, "task_memory", null)) + + } : null +} + +resource "aws_s3_bucket_object" "task_definition_template" { + count = local.s3_mirroring_enabled ? 1 : 0 + bucket = lookup(module.s3[0].outputs, "bucket_id", null) + key = format("%s/%s/task-template.json", module.ecs_cluster.outputs.cluster_name, module.this.id) + content = jsonencode(local.task_template) + server_side_encryption = "AES256" +} diff --git a/modules/ecs-service/outputs.tf b/modules/ecs-service/outputs.tf index ce2609d1f..aeb99f783 100644 --- a/modules/ecs-service/outputs.tf +++ b/modules/ecs-service/outputs.tf @@ -1,18 +1,8 @@ output "logs" { - value = module.logs + value = one(module.logs[*]) description = "Output of cloudwatch logs module" } -output "container_definition" { - value = local.container_definition - description = "Output of container definition module" -} - -output "task" { - value = module.ecs_alb_service_task - description = "Output of service task module" -} - output "ecs_cluster_arn" { value = local.ecs_cluster_arn description = "Selected ECS cluster ARN" @@ -52,3 +42,28 @@ output "full_domain" { value = local.full_domain description = "Domain to respond to GET requests" } + +output "environment_map" { + value = local.env_map_subst + description = "Environment variables to pass to the container, this is a map of key/value pairs, where the key is `containerName,variableName`" +} + +output "service_image" { + value = try(nonsensitive(local.containers_priority_terraform.service.image), null) + description = "The image of the service container" +} + +output "task_template" { + value = local.s3_mirroring_enabled ? jsondecode(nonsensitive(jsonencode(local.task_template))) : null + description = "The task template rendered" +} + +output "task_definition_arn" { + value = one(module.ecs_alb_service_task[*].task_definition_arn) + description = "The task definition ARN" +} + +output "task_definition_revision" { + value = one(module.ecs_alb_service_task[*].task_definition_revision) + description = "The task definition revision" +} diff --git a/modules/ecs-service/providers.tf b/modules/ecs-service/providers.tf index 84a1dea80..ef923e10a 100644 --- a/modules/ecs-service/providers.tf +++ b/modules/ecs-service/providers.tf @@ -1,29 +1,19 @@ -module "iam_roles" { - source = "../account-map/modules/iam-roles" - - context = module.this.context -} - provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_role_arn) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context } diff --git a/modules/ecs-service/remote-state.tf b/modules/ecs-service/remote-state.tf index dc066edc9..a32de427f 100644 --- a/modules/ecs-service/remote-state.tf +++ b/modules/ecs-service/remote-state.tf @@ -1,164 +1,158 @@ locals { - # Grab only namespace, tenant, environment, stage since those will be the common tags across resources of interest in this account - match_tags = { - for key, value in module.this.tags : - key => value - if contains(["namespace", "tenant", "environment", "stage"], lower(key)) - } - - subnet_match_tags = merge({ - Attributes = local.assign_public_ip ? "public" : "private" - }, var.subnet_match_tags) - - lb_match_tags = merge({ - # e.g. platform-public - Attributes = format("%s-%s", local.cluster_type, local.domain_type) - }, var.lb_match_tags) - - vpc_id = try(one(data.aws_vpc.selected[*].id), null) - vpc_sg_id = try(one(data.aws_security_group.vpc_default[*].id), null) - rds_sg_id = try(one(data.aws_security_group.rds[*].id), null) - subnet_ids = try(one(data.aws_subnets.selected[*].ids), null) - ecs_cluster_arn = try(one(data.aws_ecs_cluster.selected[*].arn), null) - - lb_arn = try(one(data.aws_lb.selected[*].arn), null) - lb_name = try(one(data.aws_lb.selected[*].name), null) - lb_listener_https_arn = try(one(data.aws_lb_listener.selected_https[*].arn), null) - lb_sg_id = try(one(data.aws_security_group.lb[*].id), null) - lb_zone_id = try(one(data.aws_lb.selected[*].zone_id), null) + vpc_id = module.vpc.outputs.vpc_id + vpc_sg_id = module.vpc.outputs.vpc_default_security_group_id + rds_sg_id = try(one(module.rds[*].outputs.exports.security_groups.client), null) + subnet_ids = lookup(module.vpc.outputs.subnets, local.assign_public_ip ? "public" : "private", { ids = [] }).ids + ecs_cluster_arn = module.ecs_cluster.outputs.cluster_arn + + use_external_lb = local.use_lb && (try(length(var.alb_name) > 0, false) || try(length(var.nlb_name) > 0, false)) + + is_alb = local.use_lb && !try(length(var.nlb_name) > 0, false) + alb = local.use_lb ? (local.use_external_lb ? try(module.alb[0].outputs, null) : module.ecs_cluster.outputs.alb[var.alb_configuration]) : null + + is_nlb = local.use_lb && try(length(var.nlb_name) > 0, false) + nlb = try(module.nlb[0].outputs, null) + + use_lb = local.enabled && var.use_lb + + requested_protocol = local.use_lb && !local.lb_listener_http_is_redirect ? var.http_protocol : null + lb_protocol = local.lb_listener_http_is_redirect || try(local.is_nlb && local.nlb.is_443_enabled, false) ? "https" : "http" + http_protocol = coalesce(local.requested_protocol, local.lb_protocol) + + lb_arn = try(coalesce(local.nlb.nlb_arn, ""), coalesce(local.alb.alb_arn, ""), null) + lb_name = try(coalesce(local.nlb.nlb_name, ""), coalesce(local.alb.alb_dns_name, ""), null) + lb_listener_http_is_redirect = try(length(local.is_nlb ? "" : local.alb.http_redirect_listener_arn) > 0, false) + lb_listener_https_arn = try(coalesce(local.nlb.default_listener_arn, ""), coalesce(local.alb.https_listener_arn, ""), null) + lb_sg_id = try(local.is_nlb ? null : local.alb.security_group_id, null) + lb_zone_id = try(coalesce(local.nlb.nlb_zone_id, ""), coalesce(local.alb.alb_zone_id, ""), null) + lb_fqdn = try(coalesce(local.nlb.route53_record.fqdn, ""), coalesce(local.alb.route53_record.fqdn, ""), local.full_domain) + } ## Company specific locals for domain convention locals { - domain_name = { - tenantexample = "example.net", - } - zone_domain = format("%s.%s.%s", var.stage, var.tenant, coalesce(var.domain_name, local.domain_name[var.tenant])) + domain_type = var.alb_configuration + cluster_type = try(var.cluster_attributes[0], "platform") - domain_type = var.public_lb_enabled ? "public" : "private" - cluster_type = var.cluster_attributes[0] + zone_domain = jsondecode(data.jq_query.service_domain_query.result) # e.g. example.public-platform.{environment}.{zone_domain} - full_domain = format("%s.%s-%s.%s.%s", var.name, local.domain_type, local.cluster_type, var.environment, local.zone_domain) - - # tenant to domain mapping - vanity_domain_names = { - tenantexample = { - "dev" = "example-dev.com", - "staging" = "example-staging.com", - "prod" = "example-prod.com", - }, - } + full_domain = format("%s.%s-%s.%s.%s", join("-", concat([ + var.name + ], var.attributes)), local.domain_type, local.cluster_type, var.environment, local.zone_domain) + domain_no_service_name = format("%s-%s.%s.%s", local.domain_type, local.cluster_type, var.environment, local.zone_domain) + public_domain_no_service_name = format("%s-%s.%s.%s", "public", local.cluster_type, var.environment, local.zone_domain) + private_domain_no_service_name = format("%s-%s.%s.%s", "private", local.cluster_type, var.environment, local.zone_domain) - vanity_domain = local.vanity_domain_names[var.tenant][var.stage] - vanity_domain_zone_id = try(one(data.aws_route53_zone.selected_vanity[*].zone_id), null) -} + vanity_domain_zone_id = one(data.aws_route53_zone.selected_vanity[*].zone_id) -variable "vpc_match_tags" { - type = map(any) - description = "The additional matching tags for the VPC data source. Used with current namespace, tenant, env, and stage tags." - default = {} -} + unauthenticated_paths = local.is_nlb ? ["/"] : var.unauthenticated_paths -variable "subnet_match_tags" { - type = map(string) - description = "The additional matching tags for the VPC subnet data source. Used with current namespace, tenant, env, and stage tags." - default = {} -} + # NOTE: this is the rare _not_ in the ternary purely for readability + full_urls = !local.use_lb ? [] : [for path in local.unauthenticated_paths : format("%s://%s%s", local.http_protocol, local.lb_fqdn, trimsuffix(trimsuffix(path, "*"), "/"))] -variable "lb_match_tags" { - type = map(string) - description = "The additional matching tags for the LB data source. Used with current namespace, tenant, env, and stage tags." - default = {} } -data "aws_vpc" "selected" { - count = local.enabled ? 1 : 0 +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - default = false + component = "vpc" - tags = merge(local.match_tags, var.vpc_match_tags) + context = module.this.context } -data "aws_security_group" "vpc_default" { - count = local.enabled ? 1 : 0 - - name = "default" +module "security_group" { + count = local.enabled && var.task_security_group_component != null ? 1 : 0 + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - vpc_id = local.vpc_id + component = var.task_security_group_component - tags = local.match_tags + context = module.this.context } -data "aws_subnets" "selected" { - count = local.enabled ? 1 : 0 +module "rds" { + count = local.enabled && var.use_rds_client_sg && try(length(var.rds_name), 0) > 0 ? 1 : 0 + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - filter { - name = "vpc-id" - values = [local.vpc_id] - } + component = var.rds_name - tags = merge(local.match_tags, local.subnet_match_tags) + context = module.this.context } -module "ecs_label" { - source = "cloudposse/label/null" - version = "0.25.0" +module "ecs_cluster" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - name = var.cluster_name - attributes = var.cluster_attributes + component = coalesce(var.ecs_cluster_name, "ecs-cluster") context = module.this.context } -module "rds_sg_label" { - source = "cloudposse/label/null" - version = "0.25.0" +module "alb" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - name = var.kms_key_alias - attributes = ["client"] + count = local.is_alb && local.use_external_lb ? 1 : 0 + + component = var.alb_name context = module.this.context } -data "aws_security_group" "rds" { - count = local.enabled && var.use_rds_client_sg ? 1 : 0 +module "nlb" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.is_nlb ? 1 : 0 - vpc_id = local.vpc_id + component = var.nlb_name - tags = { - "Name" = module.rds_sg_label.id - } + context = module.this.context } -data "aws_ecs_cluster" "selected" { - count = local.enabled ? 1 : 0 +module "s3" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.s3_mirroring_enabled ? 1 : 0 + + component = var.s3_mirror_name - cluster_name = coalesce(var.cluster_full_name, module.ecs_label.id) + context = module.this.context } -data "aws_security_group" "lb" { - count = local.enabled ? 1 : 0 - vpc_id = local.vpc_id +module "service_domain" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - tags = merge(local.match_tags, local.lb_match_tags) -} + component = var.zone_component -data "aws_lb" "selected" { - count = local.enabled ? 1 : 0 + context = module.this.context + environment = "gbl" +} - tags = merge(local.match_tags, local.lb_match_tags) +data "jq_query" "service_domain_query" { + data = jsonencode(one(module.service_domain[*].outputs)) + query = var.zone_component_output } -data "aws_lb_listener" "selected_https" { - count = local.enabled ? 1 : 0 +module "datadog_configuration" { + count = var.datadog_agent_sidecar_enabled ? 1 : 0 + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - load_balancer_arn = local.lb_arn - port = 443 + component = "datadog_keys" + + context = module.this.context } + # This is purely a check to ensure this zone exists +# tflint-ignore: terraform_unused_declarations data "aws_route53_zone" "selected" { count = local.enabled ? 1 : 0 @@ -167,9 +161,9 @@ data "aws_route53_zone" "selected" { } data "aws_route53_zone" "selected_vanity" { - count = local.enabled ? 1 : 0 + count = local.enabled && var.vanity_domain != null ? 1 : 0 - name = local.vanity_domain + name = var.vanity_domain private_zone = false } @@ -177,3 +171,30 @@ data "aws_kms_alias" "selected" { count = local.enabled && var.kinesis_enabled ? 1 : 0 name = format("alias/%s", coalesce(var.kms_key_alias, var.name)) } + +module "iam_role" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + count = local.enabled && var.task_iam_role_component != null ? 1 : 0 + + component = var.task_iam_role_component + + context = module.this.context +} + +module "efs" { + for_each = local.efs_component_map + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + # Here we can use [0] because aws only allows one efs volume configuration per volume + component = each.value.efs_volume_configuration[0].component + + context = module.this.context + + tenant = each.value.efs_volume_configuration[0].tenant + stage = each.value.efs_volume_configuration[0].stage + environment = each.value.efs_volume_configuration[0].environment + +} diff --git a/modules/ecs-service/systems-manager.tf b/modules/ecs-service/systems-manager.tf new file mode 100644 index 000000000..5a40afa07 --- /dev/null +++ b/modules/ecs-service/systems-manager.tf @@ -0,0 +1,70 @@ +# AWS KMS alias used for encryption/decryption of SSM secure strings +variable "kms_alias_name_ssm" { + type = string + default = "alias/aws/ssm" + description = "KMS alias name for SSM" +} + +variable "ssm_enabled" { + type = bool + default = false + description = "If `true` create SSM keys for the database user and password." +} + +variable "ssm_key_format" { + type = string + default = "/%v/%v/%v" + description = "SSM path format. The values will will be used in the following order: `var.ssm_key_prefix`, `var.name`, `var.ssm_key_*`" +} + +variable "ssm_key_prefix" { + type = string + default = "ecs-service" + description = "SSM path prefix. Omit the leading forward slash `/`." +} + +locals { + ssm_enabled = module.this.enabled && var.ssm_enabled + + url_params = { for i, url in local.full_urls : format(var.ssm_key_format, var.ssm_key_prefix, var.name, "url/${i}") => { + description = "ECS Service URL for ${var.name}" + type = "String", + value = url + } + } + + params = merge({}, local.url_params) + + # Use the format for any other params we need to create + # params = { + # "${format(var.ssm_key_format, var.ssm_key_prefix, var.name, "name")}" = { + # description = "ECS Service [name here] for ${var.name}" + # type = "String", + # value = "some value" + # }, + # } +} + +resource "aws_ssm_parameter" "full_urls" { + for_each = local.ssm_enabled ? local.params : {} + + name = each.key + description = each.value.description + type = each.value.type + key_id = var.kms_alias_name_ssm + value = each.value.value + overwrite = true + + tags = module.this.tags +} + + +output "ssm_key_prefix" { + value = local.ssm_enabled ? format(var.ssm_key_format, var.ssm_key_prefix, var.name, "") : null + description = "SSM prefix" +} + +output "ssm_parameters" { + description = "SSM parameters for the ECS Service" + value = local.ssm_enabled ? keys(local.params) : [] +} diff --git a/modules/ecs-service/variables.tf b/modules/ecs-service/variables.tf index 085fa4212..55e06af68 100644 --- a/modules/ecs-service/variables.tf +++ b/modules/ecs-service/variables.tf @@ -9,32 +9,189 @@ variable "cluster_attributes" { default = [] } -variable "cluster_name" { - type = string - description = "The name of the cluster" +variable "logs" { + type = any + description = "Feed inputs into cloudwatch logs module" + default = {} +} + +variable "ecs_cluster_name" { + type = any + description = "The name of the ECS Cluster this belongs to" default = "ecs" } -variable "cluster_full_name" { +variable "alb_name" { type = string - description = "The fully qualified name of the cluster. This will override the `cluster_suffix`." - default = "" + description = "The name of the ALB this service should attach to" + default = null } -variable "logs" { +variable "nlb_name" { + type = string + description = "The name of the NLB this service should attach to" + default = null +} + +variable "s3_mirror_name" { + type = string + description = "The name of the S3 mirror component" + default = null +} + +variable "rds_name" { type = any - description = "Feed inputs into cloudwatch logs module" - default = {} + description = "The name of the RDS database this service should allow access to" + default = null } variable "containers" { - type = any + type = map(object({ + name = string + ecr_image = optional(string) + image = optional(string) + memory = optional(number) + memory_reservation = optional(number) + cpu = optional(number) + essential = optional(bool, true) + readonly_root_filesystem = optional(bool, null) + privileged = optional(bool, null) + container_depends_on = optional(list(object({ + containerName = string + condition = string # START, COMPLETE, SUCCESS, HEALTHY + })), null) + + port_mappings = optional(list(object({ + containerPort = number + hostPort = optional(number) + protocol = optional(string) + name = optional(string) + appProtocol = optional(string) + })), []) + command = optional(list(string), null) + entrypoint = optional(list(string), null) + healthcheck = optional(object({ + command = list(string) + interval = number + retries = number + startPeriod = number + timeout = number + }), null) + ulimits = optional(list(object({ + name = string + softLimit = number + hardLimit = number + })), null) + log_configuration = optional(object({ + logDriver = string + options = optional(map(string), {}) + })) + docker_labels = optional(map(string), null) + map_environment = optional(map(string), {}) + map_secrets = optional(map(string), {}) + volumes_from = optional(list(object({ + sourceContainer = string + readOnly = bool + })), null) + mount_points = optional(list(object({ + sourceVolume = optional(string) + containerPath = optional(string) + readOnly = optional(bool) + })), []) + })) description = "Feed inputs into container definition module" default = {} } variable "task" { - type = any + type = object({ + task_cpu = optional(number) + task_memory = optional(number) + task_role_arn = optional(string, "") + pid_mode = optional(string, null) + ipc_mode = optional(string, null) + network_mode = optional(string) + propagate_tags = optional(string) + assign_public_ip = optional(bool, false) + use_alb_security_groups = optional(bool, true) + launch_type = optional(string, "FARGATE") + scheduling_strategy = optional(string, "REPLICA") + capacity_provider_strategies = optional(list(object({ + capacity_provider = string + weight = number + base = number + })), []) + + deployment_minimum_healthy_percent = optional(number, null) + deployment_maximum_percent = optional(number, null) + desired_count = optional(number, 0) + min_capacity = optional(number, 1) + max_capacity = optional(number, 2) + wait_for_steady_state = optional(bool, true) + circuit_breaker_deployment_enabled = optional(bool, true) + circuit_breaker_rollback_enabled = optional(bool, true) + + ecs_service_enabled = optional(bool, true) + bind_mount_volumes = optional(list(object({ + name = string + host_path = string + })), []) + efs_volumes = optional(list(object({ + host_path = string + name = string + efs_volume_configuration = list(object({ + file_system_id = string + root_directory = string + transit_encryption = string + transit_encryption_port = string + authorization_config = list(object({ + access_point_id = string + iam = string + })) + })) + })), []) + efs_component_volumes = optional(list(object({ + host_path = string + name = string + efs_volume_configuration = list(object({ + component = optional(string, "efs") + tenant = optional(string, null) + environment = optional(string, null) + stage = optional(string, null) + + root_directory = string + transit_encryption = string + transit_encryption_port = string + authorization_config = list(object({ + access_point_id = string + iam = string + })) + })) + })), []) + docker_volumes = optional(list(object({ + host_path = string + name = string + docker_volume_configuration = list(object({ + autoprovision = bool + driver = string + driver_opts = map(string) + labels = map(string) + scope = string + })) + })), []) + fsx_volumes = optional(list(object({ + host_path = string + name = string + fsx_windows_file_server_volume_configuration = list(object({ + file_system_id = string + root_directory = string + authorization_config = list(object({ + credentials_parameter = string + domain = string + })) + })) + })), []) + }) description = "Feed inputs into ecs_alb_service_task module" default = {} } @@ -48,16 +205,16 @@ variable "task_policy_arns" { ] } -variable "domain_name" { - type = string - description = "The domain name to use as the host header suffix" - default = "" +variable "unauthenticated_paths" { + type = list(string) + description = "Unauthenticated path pattern to match" + default = [] } -variable "public_lb_enabled" { - type = bool - description = "Whether or not to use public LB and public subnets" - default = false +variable "unauthenticated_priority" { + type = string + description = "The priority for the rules without authentication, between 1 and 50000 (1 being highest priority). Must be different from `authenticated_priority` since a listener can't have multiple rules with the same priority " + default = 0 } variable "task_enabled" { @@ -78,12 +235,6 @@ variable "ecr_region" { default = "" } -variable "account_stage" { - type = string - description = "The ecr stage (account) name to use for the fully qualified stage parameter store." - default = "auto" -} - variable "iam_policy_statements" { type = any description = "Map of IAM policy statements to use in the policy. This can be used with or instead of the `var.iam_source_json_url`." @@ -102,6 +253,12 @@ variable "vanity_alias" { default = [] } +variable "additional_targets" { + type = list(string) + description = "Additional target routes to add to the ALB that point to this service. The only difference between this and `var.vanity_alias` is `var.vanity_alias` will create an alias record in Route 53 in the hosted zone in this account as well. `var.additional_targets` only adds the listener route to this service's target group." + default = [] +} + variable "kinesis_enabled" { type = bool description = "Enable Kinesis" @@ -110,17 +267,19 @@ variable "kinesis_enabled" { variable "shard_count" { description = "Number of shards that the stream will use" - default = "1" + type = number + default = 1 } variable "retention_period" { description = "Length of time data records are accessible after they are added to the stream" - default = "48" + type = number + default = 48 } variable "shard_level_metrics" { description = "List of shard-level CloudWatch metrics which can be enabled for the stream" - + type = list(string) default = [ "IncomingBytes", "IncomingRecords", @@ -134,6 +293,7 @@ variable "shard_level_metrics" { variable "kms_key_alias" { description = "ID of KMS key" + type = string default = "default" } @@ -143,6 +303,17 @@ variable "use_lb" { type = bool } +variable "http_protocol" { + description = "Which http protocol to use in outputs and SSM url params. This value is ignored if a load balancer is not used. If it is `null`, the redirect value from the ALB determines the protocol." + default = null + type = string + + validation { + condition = anytrue([var.http_protocol == null, try(contains(["https", "http"], var.http_protocol), false)]) + error_message = "Allowed values: `http`, `https`, and `null`." + } +} + variable "stream_mode" { description = "Stream mode details for the Kinesis stream" default = "PROVISIONED" @@ -155,8 +326,322 @@ variable "use_rds_client_sg" { default = false } -variable "ecs_service_enabled" { +variable "chamber_service" { + default = "ecs-service" + type = string + description = "SSM parameter service name for use with chamber. This is used in chamber_format where /$chamber_service/$name/$container_name/$parameter would be the default." +} + +variable "zone_component" { + type = string + description = "The component name to look up service domain remote-state on" + default = "dns-delegated" +} + +variable "zone_component_output" { + type = string + description = "A json query to use to get the zone domain from the remote state. See " + default = ".default_domain_name" +} + +variable "vanity_domain" { + default = null + type = string + description = "Whether to use the vanity domain alias for the service" +} + +variable "alb_configuration" { + type = string + description = "The configuration to use for the ALB, specifying which cluster alb configuration to use" + default = "default" +} + +variable "health_check_path" { + type = string + description = "The destination for the health check request" + default = "/health" +} + +variable "health_check_port" { + type = string + default = "traffic-port" + description = "The port to use to connect with the target. Valid values are either ports 1-65536, or `traffic-port`. Defaults to `traffic-port`" +} + +variable "health_check_timeout" { + type = number + default = 10 + description = "The amount of time to wait in seconds before failing a health check request" +} + +variable "health_check_healthy_threshold" { + type = number + default = 2 + description = "The number of consecutive health checks successes required before healthy" +} + +variable "health_check_unhealthy_threshold" { + type = number + default = 2 + description = "The number of consecutive health check failures required before unhealthy" +} + +variable "health_check_interval" { + type = number + default = 15 + description = "The duration in seconds in between health checks" +} + +variable "health_check_matcher" { + type = string + default = "200-404" + description = "The HTTP response codes to indicate a healthy check" +} + +variable "lb_catch_all" { + type = bool + description = "Should this service act as catch all for all subdomain hosts of the vanity domain" + default = false +} + +variable "stickiness_type" { + type = string + default = "lb_cookie" + description = "The type of sticky sessions. The only current possible value is `lb_cookie`" +} + +variable "stickiness_cookie_duration" { + type = number + default = 86400 + description = "The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds)" +} + +variable "stickiness_enabled" { + type = bool default = true + description = "Boolean to enable / disable `stickiness`. Default is `true`" +} + +variable "autoscaling_enabled" { type = bool - description = "Whether to create the ECS service" + default = true + description = "Should this service autoscale using SNS alarams" +} + +variable "autoscaling_dimension" { + type = string + description = "The dimension to use to decide to autoscale" + default = "cpu" + + validation { + condition = contains(["cpu", "memory"], var.autoscaling_dimension) + error_message = "Allowed values for autoscaling_dimension are \"cpu\" or \"memory\"." + } +} + +variable "cpu_utilization_high_threshold" { + type = number + description = "The maximum percentage of CPU utilization average" + default = 80 +} + +variable "cpu_utilization_high_evaluation_periods" { + type = number + description = "Number of periods to evaluate for the alarm" + default = 1 +} + +variable "cpu_utilization_high_period" { + type = number + description = "Duration in seconds to evaluate for the alarm" + default = 300 +} + +variable "cpu_utilization_high_alarm_actions" { + type = list(string) + description = "A list of ARNs (i.e. SNS Topic ARN) to notify on CPU Utilization High Alarm action" + default = [] +} + +variable "cpu_utilization_high_ok_actions" { + type = list(string) + description = "A list of ARNs (i.e. SNS Topic ARN) to notify on CPU Utilization High OK action" + default = [] +} + +variable "cpu_utilization_low_threshold" { + type = number + description = "The minimum percentage of CPU utilization average" + default = 20 +} + +variable "cpu_utilization_low_evaluation_periods" { + type = number + description = "Number of periods to evaluate for the alarm" + default = 1 +} + +variable "cpu_utilization_low_period" { + type = number + description = "Duration in seconds to evaluate for the alarm" + default = 300 +} + +variable "cpu_utilization_low_alarm_actions" { + type = list(string) + description = "A list of ARNs (i.e. SNS Topic ARN) to notify on CPU Utilization Low Alarm action" + default = [] +} + +variable "cpu_utilization_low_ok_actions" { + type = list(string) + description = "A list of ARNs (i.e. SNS Topic ARN) to notify on CPU Utilization Low OK action" + default = [] +} + +variable "memory_utilization_high_threshold" { + type = number + description = "The maximum percentage of Memory utilization average" + default = 80 +} + +variable "memory_utilization_high_evaluation_periods" { + type = number + description = "Number of periods to evaluate for the alarm" + default = 1 +} + +variable "memory_utilization_high_period" { + type = number + description = "Duration in seconds to evaluate for the alarm" + default = 300 +} + +variable "memory_utilization_high_alarm_actions" { + type = list(string) + description = "A list of ARNs (i.e. SNS Topic ARN) to notify on Memory Utilization High Alarm action" + default = [] +} + +variable "memory_utilization_high_ok_actions" { + type = list(string) + description = "A list of ARNs (i.e. SNS Topic ARN) to notify on Memory Utilization High OK action" + default = [] +} + +variable "memory_utilization_low_threshold" { + type = number + description = "The minimum percentage of Memory utilization average" + default = 20 +} + +variable "memory_utilization_low_evaluation_periods" { + type = number + description = "Number of periods to evaluate for the alarm" + default = 1 +} + +variable "memory_utilization_low_period" { + type = number + description = "Duration in seconds to evaluate for the alarm" + default = 300 +} + +variable "memory_utilization_low_alarm_actions" { + type = list(string) + description = "A list of ARNs (i.e. SNS Topic ARN) to notify on Memory Utilization Low Alarm action" + default = [] +} + +variable "memory_utilization_low_ok_actions" { + type = list(string) + description = "A list of ARNs (i.e. SNS Topic ARN) to notify on Memory Utilization Low OK action" + default = [] +} + +variable "task_security_group_component" { + type = string + description = "A component that outputs security_group_id for adding to the service as a whole." + default = null +} + +variable "task_iam_role_component" { + type = string + description = "A component that outputs an iam_role module as 'role' for adding to the service as a whole." + default = null +} + +variable "task_exec_policy_arns_map" { + type = map(string) + description = <<-EOT + A map of name to IAM Policy ARNs to attach to the generated task execution role. + The names are arbitrary, but must be known at plan time. The purpose of the name + is so that changes to one ARN do not cause a ripple effect on the other ARNs. + If you cannot provide unique names known at plan time, use `task_exec_policy_arns` instead. + EOT + default = {} +} + + +variable "exec_enabled" { + type = bool + description = "Specifies whether to enable Amazon ECS Exec for the tasks within the service" + default = false +} + +variable "service_connect_configurations" { + type = list(object({ + enabled = bool + namespace = optional(string, null) + log_configuration = optional(object({ + log_driver = string + options = optional(map(string), null) + secret_option = optional(list(object({ + name = string + value_from = string + })), []) + }), null) + service = optional(list(object({ + client_alias = list(object({ + dns_name = string + port = number + })) + discovery_name = optional(string, null) + ingress_port_override = optional(number, null) + port_name = string + })), []) + })) + description = <<-EOT + The list of Service Connect configurations. + See `service_connect_configuration` docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_service#service_connect_configuration + EOT + default = [] +} + +variable "service_registries" { + type = list(object({ + namespace = string + registry_arn = optional(string) + port = optional(number) + container_name = optional(string) + container_port = optional(number) + })) + description = <<-EOT + The list of Service Registries. + See `service_registries` docs https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_service#service_registries + EOT + default = [] +} + +variable "custom_security_group_rules" { + type = list(object({ + type = string + from_port = number + to_port = number + protocol = string + cidr_blocks = list(string) + description = optional(string) + })) + description = "The list of custom security group rules to add to the service security group" + default = [] } diff --git a/modules/ecs-service/versions.tf b/modules/ecs-service/versions.tf index e89eb16ed..c06bc308b 100644 --- a/modules/ecs-service/versions.tf +++ b/modules/ecs-service/versions.tf @@ -3,8 +3,24 @@ terraform { required_providers { aws = { - source = "hashicorp/aws" - version = "~> 4.0" + source = "hashicorp/aws" + ## 4.66.0 version cause error + ## β”‚ Error: listing tags for Application Auto Scaling Target (): InvalidParameter: 1 validation error(s) found. + ## β”‚ - minimum field size of 1, ListTagsForResourceInput.ResourceARN. + ## β”‚ + ## β”‚ + ## β”‚ with module.ecs_cloudwatch_autoscaling[0].aws_appautoscaling_target.default[0], + ## β”‚ on .terraform/modules/ecs_cloudwatch_autoscaling/main.tf line 15, in resource "aws_appautoscaling_target" "default": + ## β”‚ 15: resource "aws_appautoscaling_target" "default" { + version = ">= 4.66.1" + } + template = { + source = "cloudposse/template" + version = ">= 2.2" + } + jq = { + source = "massdriver-cloud/jq" + version = ">=0.2.0" } } } diff --git a/modules/ecs/README.md b/modules/ecs/README.md index 5c5e9b563..751d7bb83 100644 --- a/modules/ecs/README.md +++ b/modules/ecs/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/ecs + - layer/ecs + - provider/aws +--- + # Component: `ecs` This component is responsible for provisioning an ECS Cluster and associated load balancer. @@ -12,52 +19,82 @@ The following will create - ecs cluster - load balancer with an ACM cert placed on example.com -- r53 record on all *.example.com which will point to the load balancer +- r53 record on all \*.example.com which will point to the load balancer ```yaml components: terraform: ecs: + settings: + spacelift: + workspace_enabled: true vars: name: ecs + enabled: true acm_certificate_domain: example.com route53_record_name: "*" # Create records will be created in each zone zone_names: - example.com + capacity_providers_fargate: true + capacity_providers_fargate_spot: true + capacity_providers_ec2: + default: + instance_type: t3.medium + max_size: 2 + + alb_configuration: + public: + internal_enabled: false + # resolves to *.public-platform..... + route53_record_name: "*.public-platform" + additional_certs: + - "my-vanity-domain.com" + private: + internal_enabled: true + route53_record_name: "*.private-platform" + additional_certs: + - "my-vanity-domain.com" ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | > 4.0 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | > 4.0 | +| [aws](#provider\_aws) | >= 4.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [alb](#module\_alb) | cloudposse/alb/aws | 1.4.0 | -| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [alb](#module\_alb) | cloudposse/alb/aws | 1.11.1 | +| [cluster](#module\_cluster) | cloudposse/ecs-cluster/aws | 0.4.1 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [target\_group\_label](#module\_target\_group\_label) | cloudposse/label/null | 0.25.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources | Name | Type | |------|------| -| [aws_ecs_cluster.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_cluster) | resource | +| [aws_lb_listener_certificate.additional_certs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener_certificate) | resource | | [aws_route53_record.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_security_group.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.egress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.ingress_cidr](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.ingress_security_groups](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_acm_certificate.additional_certs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/acm_certificate) | data source | | [aws_acm_certificate.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/acm_certificate) | data source | ## Inputs @@ -70,7 +107,12 @@ components: | [alb\_configuration](#input\_alb\_configuration) | Map of multiple ALB configurations. | `map(any)` | `{}` | no | | [alb\_ingress\_cidr\_blocks\_http](#input\_alb\_ingress\_cidr\_blocks\_http) | List of CIDR blocks allowed to access environment over HTTP | `list(string)` |
[
"0.0.0.0/0"
]
| no | | [alb\_ingress\_cidr\_blocks\_https](#input\_alb\_ingress\_cidr\_blocks\_https) | List of CIDR blocks allowed to access environment over HTTPS | `list(string)` |
[
"0.0.0.0/0"
]
| no | +| [allowed\_cidr\_blocks](#input\_allowed\_cidr\_blocks) | List of CIDR blocks to be allowed to connect to the ECS cluster | `list(string)` | `[]` | no | +| [allowed\_security\_groups](#input\_allowed\_security\_groups) | List of Security Group IDs to be allowed to connect to the ECS cluster | `list(string)` | `[]` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [capacity\_providers\_ec2](#input\_capacity\_providers\_ec2) | EC2 autoscale groups capacity providers |
map(object({
instance_type = string
max_size = number
security_group_ids = optional(list(string), [])
min_size = optional(number, 0)
image_id = optional(string)
instance_initiated_shutdown_behavior = optional(string, "terminate")
key_name = optional(string, "")
user_data = optional(string, "")
enable_monitoring = optional(bool, true)
instance_warmup_period = optional(number, 300)
maximum_scaling_step_size = optional(number, 1)
minimum_scaling_step_size = optional(number, 1)
target_capacity_utilization = optional(number, 100)
ebs_optimized = optional(bool, false)
block_device_mappings = optional(list(object({
device_name = string
no_device = bool
virtual_name = string
ebs = object({
delete_on_termination = bool
encrypted = bool
iops = number
kms_key_id = string
snapshot_id = string
volume_size = number
volume_type = string
})
})), [])
instance_market_options = optional(object({
market_type = string
spot_options = object({
block_duration_minutes = number
instance_interruption_behavior = string
max_price = number
spot_instance_type = string
valid_until = string
})
}))
instance_refresh = optional(object({
strategy = string
preferences = optional(object({
instance_warmup = optional(number, null)
min_healthy_percentage = optional(number, null)
skip_matching = optional(bool, null)
auto_rollback = optional(bool, null)
}), null)
triggers = optional(list(string), [])
}))
mixed_instances_policy = optional(object({
instances_distribution = object({
on_demand_allocation_strategy = string
on_demand_base_capacity = number
on_demand_percentage_above_base_capacity = number
spot_allocation_strategy = string
spot_instance_pools = number
spot_max_price = string
})
}), {
instances_distribution = null
})
placement = optional(object({
affinity = string
availability_zone = string
group_name = string
host_id = string
tenancy = string
}))
credit_specification = optional(object({
cpu_credits = string
}))
elastic_gpu_specifications = optional(object({
type = string
}))
disable_api_termination = optional(bool, false)
default_cooldown = optional(number, 300)
health_check_grace_period = optional(number, 300)
force_delete = optional(bool, false)
termination_policies = optional(list(string), ["Default"])
suspended_processes = optional(list(string), [])
placement_group = optional(string, "")
metrics_granularity = optional(string, "1Minute")
enabled_metrics = optional(list(string), [
"GroupMinSize",
"GroupMaxSize",
"GroupDesiredCapacity",
"GroupInServiceInstances",
"GroupPendingInstances",
"GroupStandbyInstances",
"GroupTerminatingInstances",
"GroupTotalInstances",
"GroupInServiceCapacity",
"GroupPendingCapacity",
"GroupStandbyCapacity",
"GroupTerminatingCapacity",
"GroupTotalCapacity",
"WarmPoolDesiredCapacity",
"WarmPoolWarmedCapacity",
"WarmPoolPendingCapacity",
"WarmPoolTerminatingCapacity",
"WarmPoolTotalCapacity",
"GroupAndWarmPoolDesiredCapacity",
"GroupAndWarmPoolTotalCapacity",
])
wait_for_capacity_timeout = optional(string, "10m")
service_linked_role_arn = optional(string, "")
metadata_http_endpoint_enabled = optional(bool, true)
metadata_http_put_response_hop_limit = optional(number, 2)
metadata_http_tokens_required = optional(bool, true)
metadata_http_protocol_ipv6_enabled = optional(bool, false)
tag_specifications_resource_types = optional(set(string), ["instance", "volume"])
max_instance_lifetime = optional(number, null)
capacity_rebalance = optional(bool, false)
warm_pool = optional(object({
pool_state = string
min_size = number
max_group_prepared_capacity = number
}))
}))
| `{}` | no | +| [capacity\_providers\_fargate](#input\_capacity\_providers\_fargate) | Use FARGATE capacity provider | `bool` | `true` | no | +| [capacity\_providers\_fargate\_spot](#input\_capacity\_providers\_fargate\_spot) | Use FARGATE\_SPOT capacity provider | `bool` | `false` | no | | [container\_insights\_enabled](#input\_container\_insights\_enabled) | Whether or not to enable container insights | `bool` | `true` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | @@ -81,8 +123,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [internal\_enabled](#input\_internal\_enabled) | Whether to create an internal load balancer for services in this cluster | `bool` | `false` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | @@ -112,8 +152,11 @@ components: | [security\_group\_id](#output\_security\_group\_id) | Security group id | | [vpc\_id](#output\_vpc\_id) | VPC ID | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/ecs) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/ecs) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/ecs/default.auto.tfvars b/modules/ecs/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/ecs/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/ecs/main.tf b/modules/ecs/main.tf index d33f5549e..909f7a831 100644 --- a/modules/ecs/main.tf +++ b/modules/ecs/main.tf @@ -3,7 +3,10 @@ locals { dns_enabled = local.enabled && var.route53_enabled - acm_certificate_domain = length(var.acm_certificate_domain_suffix) > 0 ? format("%s.%s.%s", var.acm_certificate_domain_suffix, var.environment, module.dns_delegated.outputs.default_domain_name) : coalesce(var.acm_certificate_domain, module.dns_delegated.outputs.default_domain_name) + # If var.acm_certificate_domain is defined, use it. + # Else if var.acm_certificate_domain_suffix is defined, use {{ var.acm_certificate_domain_suffix }}.{{ environment }}.{{ domain }} + # Else, use {{ environment }}.{{ domain }} + acm_certificate_domain = try(length(var.acm_certificate_domain) > 0, false) ? var.acm_certificate_domain : try(length(var.acm_certificate_domain_suffix) > 0, false) ? format("%s.%s.%s", var.acm_certificate_domain_suffix, var.environment, module.dns_delegated.outputs.default_domain_name) : format("%s.%s", var.environment, module.dns_delegated.outputs.default_domain_name) maintenance_page_fixed_response = { content_type = "text/html" @@ -27,47 +30,118 @@ module "target_group_label" { context = module.this.context } -resource "aws_ecs_cluster" "default" { - count = local.enabled ? 1 : 0 - - name = module.this.id - - # TODO: configuration.execute_command_configuration - # execute_command_configuration { - # kms_key_id = - # logging = "OVERRIDE" # "DEFAULT" - # # log_configuration is required when logging is set to "OVERRIDE" - # log_configuration { - # cloud_watch_encryption_enabled = var.cloud_watch_encryption_enabled - # cloud_watch_log_group_name = module.cloudwatch_log_group.name - # s3_bucket_name = module.logging_bucket.name - # s3_bucket_encryption_enabled = true - # s3_key_prefix = "/" - # } - # } - - setting { - name = "containerInsights" - value = var.container_insights_enabled ? "enabled" : "disabled" +resource "aws_security_group" "default" { + count = local.enabled ? 1 : 0 + name = module.this.id + description = "ECS cluster EC2 autoscale capacity providers" + vpc_id = module.vpc.outputs.vpc_id +} + +resource "aws_security_group_rule" "ingress_cidr" { + for_each = local.enabled ? toset(var.allowed_cidr_blocks) : [] + type = "ingress" + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_blocks = [each.value] + security_group_id = join("", aws_security_group.default[*].id) +} + +resource "aws_security_group_rule" "ingress_security_groups" { + for_each = local.enabled ? toset(var.allowed_security_groups) : [] + type = "ingress" + from_port = 0 + to_port = 65535 + protocol = "tcp" + source_security_group_id = each.value + security_group_id = join("", aws_security_group.default[*].id) +} + +resource "aws_security_group_rule" "egress" { + count = local.enabled ? 1 : 0 + type = "egress" + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = join("", aws_security_group.default[*].id) +} + +module "cluster" { + source = "cloudposse/ecs-cluster/aws" + version = "0.4.1" + + context = module.this.context + + container_insights_enabled = var.container_insights_enabled + capacity_providers_fargate = var.capacity_providers_fargate + capacity_providers_fargate_spot = var.capacity_providers_fargate_spot + capacity_providers_ec2 = { + for name, provider in var.capacity_providers_ec2 : + name => merge( + provider, + { + security_group_ids = concat(aws_security_group.default[*].id, provider.security_group_ids) + subnet_ids = var.internal_enabled ? module.vpc.outputs.private_subnet_ids : module.vpc.outputs.public_subnet_ids + associate_public_ip_address = !var.internal_enabled + } + ) } - tags = module.this.tags + # external_ec2_capacity_providers = { + # external_default = { + # autoscaling_group_arn = module.autoscale_group.autoscaling_group_arn + # managed_termination_protection = false + # managed_scaling_status = false + # instance_warmup_period = 300 + # maximum_scaling_step_size = 1 + # minimum_scaling_step_size = 1 + # target_capacity_utilization = 100 + # } + # } + } -# TODO: setup capacity providers -# resource "aws_ecs_cluster_capacity_providers" "default" { -# count = local.enabled ? 1 : 0 +#locals { +# user_data = <> /etc/ecs/ecs.config +#echo ECS_ENABLE_CONTAINER_METADATA=true >> /etc/ecs/ecs.config +#echo ECS_POLL_METRICS=true >> /etc/ecs/ecs.config +#EOT # -# cluster_name = join("", aws_ecs_cluster.default[*].name) +#} # -# capacity_providers = ["FARGATE"] +#data "aws_ssm_parameter" "ami" { +# name = "/aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id" +#} # -# default_capacity_provider_strategy { -# base = 1 -# weight = 100 -# capacity_provider = "FARGATE" -# } -# } +#module "autoscale_group" { +# source = "cloudposse/ec2-autoscale-group/aws" +# version = "0.31.1" +# +# context = module.this.context +# +# image_id = data.aws_ssm_parameter.ami.value +# instance_type = "t3.medium" +# security_group_ids = aws_security_group.default[*].id +# subnet_ids = var.internal_enabled ? module.vpc.outputs.private_subnet_ids : module.vpc.outputs.public_subnet_ids +# health_check_type = "EC2" +# desired_capacity = 1 +# min_size = 1 +# max_size = 2 +# wait_for_capacity_timeout = "5m" +# associate_public_ip_address = true +# user_data_base64 = base64encode(local.user_data) +# +# # Auto-scaling policies and CloudWatch metric alarms +# autoscaling_policies_enabled = true +# cpu_utilization_high_threshold_percent = "70" +# cpu_utilization_low_threshold_percent = "20" +# +# iam_instance_profile_name = module.cluster.role_name +#} + resource "aws_route53_record" "default" { for_each = local.dns_enabled ? var.alb_configuration : {} @@ -91,12 +165,12 @@ data "aws_acm_certificate" "default" { module "alb" { source = "cloudposse/alb/aws" - version = "1.4.0" + version = "1.11.1" for_each = local.enabled ? var.alb_configuration : {} vpc_id = module.vpc.outputs.vpc_id - subnet_ids = var.internal_enabled ? module.vpc.outputs.private_subnet_ids : module.vpc.outputs.public_subnet_ids + subnet_ids = lookup(each.value, "internal_enabled", var.internal_enabled) ? module.vpc.outputs.private_subnet_ids : module.vpc.outputs.public_subnet_ids ip_address_type = lookup(each.value, "ip_address_type", "ipv4") internal = lookup(each.value, "internal_enabled", var.internal_enabled) @@ -114,9 +188,8 @@ module "alb" { https_ingress_cidr_blocks = lookup(each.value, "https_ingress_cidr_blocks", var.alb_ingress_cidr_blocks_https) certificate_arn = lookup(each.value, "certificate_arn", one(data.aws_acm_certificate.default[*].arn)) - access_logs_enabled = lookup(each.value, "access_logs_enabled", true) - alb_access_logs_s3_bucket_force_destroy = lookup(each.value, "alb_access_logs_s3_bucket_force_destroy", true) - alb_access_logs_s3_bucket_force_destroy_enabled = lookup(each.value, "alb_access_logs_s3_bucket_force_destroy_enabled", true) + access_logs_enabled = lookup(each.value, "access_logs_enabled", true) + alb_access_logs_s3_bucket_force_destroy = lookup(each.value, "alb_access_logs_s3_bucket_force_destroy", true) lifecycle_rule_enabled = lookup(each.value, "lifecycle_rule_enabled", true) @@ -154,3 +227,25 @@ module "alb" { context = module.this.context } + +locals { + # formats the load-balancer configuration data to be: + # { "${alb_configuration key}_${additional_cert_entry}" => "additional_cert_entry" } + certificate_domains = merge([ + for config_key, config in var.alb_configuration : + { for domain in config.additional_certs : + "${config_key}_${domain}" => domain } if length(lookup(config, "additional_certs", [])) > 0 + ]...) +} + +resource "aws_lb_listener_certificate" "additional_certs" { + for_each = local.certificate_domains + + listener_arn = module.alb[split("_", each.key)[0]].https_listener_arn + certificate_arn = data.aws_acm_certificate.additional_certs[each.key].arn +} +data "aws_acm_certificate" "additional_certs" { + for_each = local.certificate_domains + + domain = each.value +} diff --git a/modules/ecs/outputs.tf b/modules/ecs/outputs.tf index 87a80f618..4d730801f 100644 --- a/modules/ecs/outputs.tf +++ b/modules/ecs/outputs.tf @@ -1,10 +1,10 @@ output "cluster_arn" { - value = join("", aws_ecs_cluster.default[*].arn) + value = module.cluster.arn description = "ECS cluster ARN" } output "cluster_name" { - value = join("", aws_ecs_cluster.default[*].name) + value = module.cluster.name description = "ECS Cluster Name" } diff --git a/modules/ecs/providers.tf b/modules/ecs/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/ecs/providers.tf +++ b/modules/ecs/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/ecs/remote-state.tf b/modules/ecs/remote-state.tf index 0112e39cd..db002a31c 100644 --- a/modules/ecs/remote-state.tf +++ b/modules/ecs/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "vpc" @@ -9,7 +9,7 @@ module "vpc" { module "dns_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.dns_delegated_component_name stage = var.dns_delegated_stage_name diff --git a/modules/ecs/variables.tf b/modules/ecs/variables.tf index a90e79ae5..a1727ab60 100644 --- a/modules/ecs/variables.tf +++ b/modules/ecs/variables.tf @@ -80,3 +80,155 @@ variable "dns_delegated_component_name" { default = "dns-delegated" description = "Use this component name to read from the remote state to get the dns_delegated zone ID" } + +variable "allowed_security_groups" { + type = list(string) + default = [] + description = "List of Security Group IDs to be allowed to connect to the ECS cluster" +} + +variable "allowed_cidr_blocks" { + type = list(string) + default = [] + description = "List of CIDR blocks to be allowed to connect to the ECS cluster" +} + +variable "capacity_providers_fargate" { + description = "Use FARGATE capacity provider" + type = bool + default = true +} + +variable "capacity_providers_fargate_spot" { + description = "Use FARGATE_SPOT capacity provider" + type = bool + default = false +} + +variable "capacity_providers_ec2" { + description = "EC2 autoscale groups capacity providers" + type = map(object({ + instance_type = string + max_size = number + security_group_ids = optional(list(string), []) + min_size = optional(number, 0) + image_id = optional(string) + instance_initiated_shutdown_behavior = optional(string, "terminate") + key_name = optional(string, "") + user_data = optional(string, "") + enable_monitoring = optional(bool, true) + instance_warmup_period = optional(number, 300) + maximum_scaling_step_size = optional(number, 1) + minimum_scaling_step_size = optional(number, 1) + target_capacity_utilization = optional(number, 100) + ebs_optimized = optional(bool, false) + block_device_mappings = optional(list(object({ + device_name = string + no_device = bool + virtual_name = string + ebs = object({ + delete_on_termination = bool + encrypted = bool + iops = number + kms_key_id = string + snapshot_id = string + volume_size = number + volume_type = string + }) + })), []) + instance_market_options = optional(object({ + market_type = string + spot_options = object({ + block_duration_minutes = number + instance_interruption_behavior = string + max_price = number + spot_instance_type = string + valid_until = string + }) + })) + instance_refresh = optional(object({ + strategy = string + preferences = optional(object({ + instance_warmup = optional(number, null) + min_healthy_percentage = optional(number, null) + skip_matching = optional(bool, null) + auto_rollback = optional(bool, null) + }), null) + triggers = optional(list(string), []) + })) + mixed_instances_policy = optional(object({ + instances_distribution = object({ + on_demand_allocation_strategy = string + on_demand_base_capacity = number + on_demand_percentage_above_base_capacity = number + spot_allocation_strategy = string + spot_instance_pools = number + spot_max_price = string + }) + }), { + instances_distribution = null + }) + placement = optional(object({ + affinity = string + availability_zone = string + group_name = string + host_id = string + tenancy = string + })) + credit_specification = optional(object({ + cpu_credits = string + })) + elastic_gpu_specifications = optional(object({ + type = string + })) + disable_api_termination = optional(bool, false) + default_cooldown = optional(number, 300) + health_check_grace_period = optional(number, 300) + force_delete = optional(bool, false) + termination_policies = optional(list(string), ["Default"]) + suspended_processes = optional(list(string), []) + placement_group = optional(string, "") + metrics_granularity = optional(string, "1Minute") + enabled_metrics = optional(list(string), [ + "GroupMinSize", + "GroupMaxSize", + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances", + "GroupInServiceCapacity", + "GroupPendingCapacity", + "GroupStandbyCapacity", + "GroupTerminatingCapacity", + "GroupTotalCapacity", + "WarmPoolDesiredCapacity", + "WarmPoolWarmedCapacity", + "WarmPoolPendingCapacity", + "WarmPoolTerminatingCapacity", + "WarmPoolTotalCapacity", + "GroupAndWarmPoolDesiredCapacity", + "GroupAndWarmPoolTotalCapacity", + ]) + wait_for_capacity_timeout = optional(string, "10m") + service_linked_role_arn = optional(string, "") + metadata_http_endpoint_enabled = optional(bool, true) + metadata_http_put_response_hop_limit = optional(number, 2) + metadata_http_tokens_required = optional(bool, true) + metadata_http_protocol_ipv6_enabled = optional(bool, false) + tag_specifications_resource_types = optional(set(string), ["instance", "volume"]) + max_instance_lifetime = optional(number, null) + capacity_rebalance = optional(bool, false) + warm_pool = optional(object({ + pool_state = string + min_size = number + max_group_prepared_capacity = number + })) + })) + default = {} + validation { + condition = !contains(["FARGATE", "FARGATE_SPOT"], keys(var.capacity_providers_ec2)) + error_message = "'FARGATE' and 'FARGATE_SPOT' name is reserved" + } +} diff --git a/modules/ecs/versions.tf b/modules/ecs/versions.tf index 288178d45..4c8603db1 100644 --- a/modules/ecs/versions.tf +++ b/modules/ecs/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" - version = "> 4.0" + version = ">= 4.0" } } } diff --git a/modules/eks/efs/README.md b/modules/efs/README.md similarity index 86% rename from modules/eks/efs/README.md rename to modules/efs/README.md index 0cd19fa8a..72d289bb6 100644 --- a/modules/eks/efs/README.md +++ b/modules/efs/README.md @@ -1,6 +1,16 @@ +--- +tags: + - component/efs + - layer/data + - layer/eks + - provider/aws +--- + # Component: `efs` -This component is responsible for provisioning an [EFS](https://aws.amazon.com/efs/) Network File System with KMS encryption-at-rest. EFS is an excellent choice as the default block storage for EKS clusters so that volumes are not zone-locked. +This component is responsible for provisioning an [EFS](https://aws.amazon.com/efs/) Network File System with KMS +encryption-at-rest. EFS is an excellent choice as the default block storage for EKS clusters so that volumes are not +zone-locked. ## Usage @@ -17,33 +27,42 @@ components: name: shared-files dns_name: shared-files provisioned_throughput_in_mibps: 10 + # additional_security_group_rules: + # - key: "fargate_efs" + # type: "ingress" + # from_port: 2049 + # to_port: 2049 + # protocol: "tcp" + # description: "Allow Fargate EFS Volume mounts" + # cidr_blocks: ["0.0.0.0/0"] ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [efs](#module\_efs) | cloudposse/efs/aws | 0.32.7 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [gbl\_dns\_delegated](#module\_gbl\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [efs](#module\_efs) | cloudposse/efs/aws | 0.35.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [gbl\_dns\_delegated](#module\_gbl\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [kms\_key\_efs](#module\_kms\_key\_efs) | cloudposse/kms-key/aws | 0.12.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -57,6 +76,7 @@ components: | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [additional\_security\_group\_rules](#input\_additional\_security\_group\_rules) | A list of Security Group rule objects to add to the created security group, in addition to the ones
this module normally creates. (To suppress the module's rules, set `create_security_group` to false
and supply your own security group via `associated_security_group_ids`.)
The keys and values of the objects are fully compatible with the `aws_security_group_rule` resource, except
for `security_group_id` which will be ignored, and the optional "key" which, if provided, must be unique and known at "plan" time.
To get more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule . | `list(any)` | `[]` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | @@ -69,8 +89,6 @@ components: | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [hostname\_template](#input\_hostname\_template) | The `format()` string to use to generate the hostname via `format(var.hostname_template, var.tenant, var.stage, var.environment)`"
Typically something like `"echo.%[3]v.%[2]v.example.com"`. | `string` | n/a | yes | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -102,10 +120,11 @@ components: | [security\_group\_id](#output\_security\_group\_id) | EFS Security Group ID | | [security\_group\_name](#output\_security\_group\_name) | EFS Security Group name | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/efs) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/efs) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/efs/context.tf b/modules/efs/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/efs/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/efs/main.tf b/modules/efs/main.tf similarity index 97% rename from modules/eks/efs/main.tf rename to modules/efs/main.tf index d5f5a2c9b..948f2b68f 100644 --- a/modules/eks/efs/main.tf +++ b/modules/efs/main.tf @@ -17,12 +17,13 @@ locals { module "efs" { source = "cloudposse/efs/aws" - version = "0.32.7" + version = "0.35.0" region = var.region vpc_id = local.vpc_id subnets = local.private_subnet_ids allowed_security_group_ids = local.allowed_security_groups + additional_security_group_rules = var.additional_security_group_rules performance_mode = var.performance_mode provisioned_throughput_in_mibps = var.provisioned_throughput_in_mibps throughput_mode = var.throughput_mode diff --git a/modules/eks/efs/outputs.tf b/modules/efs/outputs.tf similarity index 100% rename from modules/eks/efs/outputs.tf rename to modules/efs/outputs.tf diff --git a/modules/efs/providers.tf b/modules/efs/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/efs/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/efs/remote-state.tf b/modules/efs/remote-state.tf similarity index 89% rename from modules/eks/efs/remote-state.tf rename to modules/efs/remote-state.tf index 4827a69f8..57f2055b5 100644 --- a/modules/eks/efs/remote-state.tf +++ b/modules/efs/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "vpc" @@ -9,7 +9,7 @@ module "vpc" { module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" for_each = local.eks_security_group_enabled ? var.eks_component_names : toset([]) @@ -20,7 +20,7 @@ module "eks" { module "gbl_dns_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "dns-delegated" environment = "gbl" diff --git a/modules/eks/efs/variables.tf b/modules/efs/variables.tf similarity index 65% rename from modules/eks/efs/variables.tf rename to modules/efs/variables.tf index 130b0bf58..c2fd666fd 100644 --- a/modules/eks/efs/variables.tf +++ b/modules/efs/variables.tf @@ -46,3 +46,16 @@ variable "eks_component_names" { description = "The names of the eks components" default = ["eks/cluster"] } + +variable "additional_security_group_rules" { + type = list(any) + default = [] + description = <<-EOT + A list of Security Group rule objects to add to the created security group, in addition to the ones + this module normally creates. (To suppress the module's rules, set `create_security_group` to false + and supply your own security group via `associated_security_group_ids`.) + The keys and values of the objects are fully compatible with the `aws_security_group_rule` resource, except + for `security_group_id` which will be ignored, and the optional "key" which, if provided, must be unique and known at "plan" time. + To get more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule . + EOT +} diff --git a/modules/efs/versions.tf b/modules/efs/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/modules/efs/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/eks-iam/providers.tf b/modules/eks-iam/providers.tf deleted file mode 100755 index 506e16d2e..000000000 --- a/modules/eks-iam/providers.tf +++ /dev/null @@ -1,21 +0,0 @@ -provider "aws" { - region = var.region - - assume_role { - # `terraform import` will not use data from a data source, - # so on import we have to explicitly specify the role - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) - } -} - -module "iam_roles" { - source = "../account-map/modules/iam-roles" - stage = var.stage - region = var.region -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/actions-runner-controller/CHANGELOG.md b/modules/eks/actions-runner-controller/CHANGELOG.md new file mode 100644 index 000000000..5fa8bdc77 --- /dev/null +++ b/modules/eks/actions-runner-controller/CHANGELOG.md @@ -0,0 +1,137 @@ +## Release 1.470.1 + +Components PR [#1077](https://github.com/cloudposse/terraform-aws-components/pull/1077) + +Bugfix: + +- Fix templating of document separators in Helm chart template. Affects users who are not using + `running_pod_annotations`. + +## Release 1.470.0 + +Components PR [#1075](https://github.com/cloudposse/terraform-aws-components/pull/1075) + +New Features: + +- Add support for + [scheduled overrides](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#scheduled-overrides) + of Runner Autoscaler min and max replicas. +- Add option `tmpfs_enabled` to have runners use RAM-backed ephemeral storage (`tmpfs`, `emptyDir.medium: Memory`) + instead of disk-backed storage. +- Add `wait_for_docker_seconds` to allow configuration of the time to wait for the Docker daemon to be ready before + starting the runner. +- Add the ability to have the runner Pods add annotations to themselves once they start running a job. (Actually + released in release 1.454.0, but not documented until now.) + +Changes: + +- Previously, `syncPeriod`, which sets the period in which the controller reconciles the desired runners count, was set + to 120 seconds in `resources/values.yaml`. This setting has been removed, reverting to the default value of 1 minute. + You can still set this value by setting the `syncPeriod` value in the `values.yaml` file or by setting `syncPeriod` in + `var.chart_values`. +- Previously, `RUNNER_GRACEFUL_STOP_TIMEOUT` was hardcoded to 90 seconds. That has been reduced to 80 seconds to expand + the buffer between that and forceful termination from 10 seconds to 20 seconds, increasing the chances the runner will + successfully deregister itself. +- The inaccurately named `webhook_startup_timeout` has been replaced with `max_duration`. `webhook_startup_timeout` is + still supported for backward compatibility, but is deprecated. + +Bugfixes: + +- Create and deploy the webhook secret when an existing secret is not supplied +- Restore proper order of operations in creating resources (broken in release 1.454.0 (PR #1055)) +- If `docker_storage` is set and `dockerdWithinRunnerContainer` is `true` (which is hardcoded to be the case), properly + mount the docker storage volume into the runner container rather than the (non-existent) docker sidecar container. + +### Discussion + +#### Scheduled overrides + +Scheduled overrides allow you to set different min and max replica values for the runner autoscaler at different times. +This can be useful if you have predictable patterns of load on your runners. For example, you might want to scale down +to zero at night and scale up during the day. This feature is implemented by adding a `scheduled_overrides` field to the +`var.runners` map. + +See the +[Actions Runner Controller documentation](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#scheduled-overrides) +for details on how they work and how to set them up. + +#### Use RAM instead of Disk via `tmpfs_enabled` + +The standard `gp3` EBS volume used for EC2 instance's disk storage is limited (unless you pay extra) to 3000 IOPS and +125 MB/s throughput. This is fine for average workloads, but it does not scale with instance size. A `.48xlarge` +instance could host 90 Pods, but all 90 would still be sharing the same single 3000 IOPS and 125 MB/s throughput EBS +volume attached to the host. This can lead to severe performance issues, as the whole Node gets locked up waiting for +disk I/O. + +To mitigate this issue, we have added the `tmpfs_enabled` option to the `runners` map. When set to `true`, the runner +Pods will use RAM-backed ephemeral storage (`tmpfs`, `emptyDir.medium: Memory`) instead of disk-backed storage. This +means the Pod's impact on the Node's disk I/O is limited to the overhead required to launch and manage the Pod (e.g. +downloading the container image and writing logs to the disk). This can be a significant performance improvement, +allowing you to run more Pods on a single Node without running into disk I/O bottlenecks. Without this feature enabled, +you may be limited to running something like 14 Runners on an instance, regardless of instance size, due to disk I/O +limits. With this feature enabled, you may be able to run 50-100 Runners on a single instance. + +The trade-off is that the Pod's data is stored in RAM, which increases its memory usage. Be sure to increase the amount +of memory allocated to the runner Pod to account for this. This is generally not a problem, as Runners typically use a +small enough amount of disk space that it can be reasonably stored in the RAM allocated to a single CPU in an EC2 +instance, so it is the CPU that remains the limiting factor in how many Runners can be run on an instance. + +> [!WARNING] +> +> #### You must configure a memory request for the runner Pod +> +> When using `tmpfs_enabled`, you must configure a memory request for the runner Pod. If you do not, a single Pod would +> be allowed to consume half the Node's memory just for its disk storage. + +#### Configure startup timeout via `wait_for_docker_seconds` + +When the runner starts and Docker-in-Docker is enabled, the runner waits for the Docker daemon to be ready before +registering marking itself ready to run jobs. This is done by polling the Docker daemon every second until it is ready. +The default timeout for this is 120 seconds. If the Docker daemon is not ready within that time, the runner will exit +with an error. You can configure this timeout by setting `wait_for_docker_seconds` in the `runners` map. + +As a general rule, the Docker daemon should be ready within a few seconds of the runner starting. However, particularly +when there are disk I/O issues (see the `tmpfs_enabled` feature above), the Docker daemon may take longer to respond. + +#### Add annotations to runner Pods once they start running a job + +You can now configure the runner Pods to add annotations to themselves once they start running a job. The idea is to +allow you to have idle pods allow themselves to be interrupted, but then mark themselves as uninterruptible once they +start running a job. This is done by setting the `running_pod_annotations` field in the `runners` map. For example: + +```yaml +running_pod_annotations: + # Prevent Karpenter from evicting or disrupting the worker pods while they are running jobs + # As of 0.37.0, is not 100% effective due to race conditions. + "karpenter.sh/do-not-disrupt": "true" +``` + +As noted in the comments above, this was intended to prevent Karpenter from evicting or disrupting the worker pods while +they are running jobs, while leaving Karpenter free to interrupt idle Runners. However, as of Karpenter 0.37.0, this is +not 100% effective due to race conditions: Karpenter may decide to terminate the Node the Pod is running on but not +signal the Pod before it accepts a job and starts running it. Without the availability of transactions or atomic +operations, this is a difficult problem to solve, and will probably require a more complex solution than just adding +annotations to the Pods. Nevertheless, this feature remains available for use in other contexts, as well as in the hope +that it will eventually work with Karpenter. + +#### Bugfix: Deploy webhook secret when existing secret is not supplied + +Because deploying secrets with Terraform causes the secrets to be stored unencrypted in the Terraform state file, we +give users the option of creating the configuration secret externally (e.g. via +[SOPS](https://github.com/getsops/sops)). Unfortunately, at some distant time in the past, when we enabled this option, +we broke this component insofar as the webhook secret was no longer being deployed when the user did not supply an +existing secret. This PR fixes that. + +The consequence of this bug was that, since the webhook secret was not being deployed, the webhook did not reject +unauthorized requests. This could have allowed an attacker to trigger the webhook and perform a DOS attack by killing +jobs as soon as they were accepted from the queue. A more practical and unintentional consequence was if a repo webhook +was installed alongside an org webhook, it would not keep guard against the webhook receiving the same payload twice if +one of the webhooks was missing the secret or had the wrong secret. + +#### Bugfix: Restore proper order of operations in creating resources + +In release 1.454.0 (PR [#1055](https://github.com/cloudposse/terraform-aws-components/pull/1055)), we reorganized the +RunnerDeployment template in the Helm chart to put the RunnerDeployment resource first, since it is the most important +resource, merely to improve readability. Unfortunately, the order of operations in creating resources is important, and +this change broke the deployment by deploying the RunnerDeployment before creating the resources it depends on. This PR +restores the proper order of operations. diff --git a/modules/eks/actions-runner-controller/README.md b/modules/eks/actions-runner-controller/README.md index 620a883df..31a64319f 100644 --- a/modules/eks/actions-runner-controller/README.md +++ b/modules/eks/actions-runner-controller/README.md @@ -1,6 +1,15 @@ -# Component: `actions-runner-controller` +--- +tags: + - component/eks/actions-runner-controller + - layer/github + - provider/aws + - provider/helm +--- -This component creates a Helm release for [actions-runner-controller](https://github.com/actions-runner-controller/actions-runner-controller) on an EKS cluster. +# Component: `eks/actions-runner-controller` + +This component creates a Helm release for +[actions-runner-controller](https://github.com/actions-runner-controller/actions-runner-controller) on an EKS cluster. ## Usage @@ -20,55 +29,168 @@ The default catalog values `e.g. stacks/catalog/eks/actions-runner-controller.ya components: terraform: eks/actions-runner-controller: - settings: - spacelift: - workspace_enabled: true vars: enabled: true name: "actions-runner" # avoids hitting name length limit on IAM role chart: "actions-runner-controller" chart_repository: "https://actions-runner-controller.github.io/actions-runner-controller" - chart_version: "0.21.0" + chart_version: "0.23.7" kubernetes_namespace: "actions-runner-system" create_namespace: true + kubeconfig_exec_auth_api_version: "client.authentication.k8s.io/v1beta1" + # helm_manifest_experiment_enabled feature causes inconsistent final plans with charts that have CRDs + # see https://github.com/hashicorp/terraform-provider-helm/issues/711#issuecomment-836192991 + helm_manifest_experiment_enabled: false + + ssm_github_secret_path: "/github_runners/controller_github_app_secret" + github_app_id: "REPLACE_ME_GH_APP_ID" + github_app_installation_id: "REPLACE_ME_GH_INSTALLATION_ID" + + # use to enable docker config json secret, which can login to dockerhub for your GHA Runners + docker_config_json_enabled: true + # The content of this param should look like: + # { + # "auths": { + # "https://index.docker.io/v1/": { + # "username": "your_username", + # "password": "your_password + # "email": "your_email", + # "auth": "$(echo "your_username:your_password" | base64)" + # } + # } + # } | base64 + ssm_docker_config_json_path: "/github_runners/docker/config-json" + + # ssm_github_webhook_secret_token_path: "/github_runners/github_webhook_secret_token" + # The webhook based autoscaler is much more efficient than the polling based autoscaler + webhook: + enabled: true + hostname_template: "gha-webhook.%[3]v.%[2]v.%[1]v.acme.com" + + eks_component_name: "eks/cluster" resources: limits: - cpu: 100m - memory: 128Mi + cpu: 500m + memory: 256Mi requests: - cpu: 100m + cpu: 250m memory: 128Mi - ssm_github_token_path: "/github_runners/controller_github_app_secret" - ssm_github_webhook_secret_token_path: "/github_runners/controller_github_app_secret" - github_app_id: "123456" - github_app_installation_id: "234567890" - webhook: - enabled: true - # gha-webhook.use1.auto.core.acme.net - hostname_template: "gha-webhook.%[3]v.%[2]v.%[1]v.acme.net" - timeout: 120 runners: - infrastructure-runner: + infra-runner: + node_selector: + kubernetes.io/os: "linux" + kubernetes.io/arch: "amd64" type: "repository" # can be either 'organization' or 'repository' - dind_enabled: false # If `true`, a Docker sidecar container will be deployed - # To run Docker in Docker (dind), change image from summerwind/actions-runner to summerwind/actions-runner-dind - image: summerwind/actions-runner - scope: "acme/infrastructure" - scale_down_delay_seconds: 300 - min_replicas: 1 - max_replicas: 5 + dind_enabled: true # If `true`, a Docker daemon will be started in the runner Pod. + # To run Docker in Docker (dind), change image to summerwind/actions-runner-dind + # If not running Docker, change image to summerwind/actions-runner use a smaller image + image: summerwind/actions-runner-dind + # `scope` is org name for Organization runners, repo name for Repository runners + scope: "org/infra" + min_replicas: 0 # Default, overridden by scheduled_overrides below + max_replicas: 20 + # Scheduled overrides. See https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#scheduled-overrides + # Order is important. The earlier entry is prioritized higher than later entries. So you usually define + # one-time overrides at the top of your list, then yearly, monthly, weekly, and lastly daily overrides. + scheduled_overrides: + # Override the daily override on the weekends + - start_time: "2024-07-06T00:00:00-08:00" # Start of Saturday morning Pacific Standard Time + end_time: "2024-07-07T23:59:59-07:00" # End of Sunday night Pacific Daylight Time + min_replicas: 0 + recurrence_rule: + frequency: "Weekly" + # Keep a warm pool of runners during normal working hours + - start_time: "2024-07-01T09:00:00-08:00" # 9am Pacific Standard Time (8am PDT), start of workday + end_time: "2024-07-01T17:00:00-07:00" # 5pm Pacific Daylight Time (6pm PST), end of workday + min_replicas: 2 + recurrence_rule: + frequency: "Daily" + scale_down_delay_seconds: 100 resources: limits: cpu: 200m - memory: 256Mi + memory: 512Mi requests: cpu: 100m memory: 128Mi webhook_driven_scaling_enabled: true + # max_duration is the duration after which a job will be considered completed, + # (and the runner killed) even if the webhook has not received a "job completed" event. + # This is to ensure that if an event is missed, it does not leave the runner running forever. + # Set it long enough to cover the longest job you expect to run and then some. + # See https://github.com/actions/actions-runner-controller/blob/9afd93065fa8b1f87296f0dcdf0c2753a0548cb7/docs/automatically-scaling-runners.md?plain=1#L264-L268 + max_duration: "90m" + # Pull-driven scaling is obsolete and should not be used. pull_driven_scaling_enabled: false + # Labels are not case-sensitive to GitHub, but *are* case-sensitive + # to the webhook based autoscaler, which requires exact matches + # between the `runs-on:` label in the workflow and the runner labels. labels: + - "Linux" + - "linux" - "Ubuntu" - - "self-hosted" + - "ubuntu" + - "X64" + - "x64" + - "x86_64" + - "amd64" + - "AMD64" + - "core-auto" + - "common" + # Uncomment this additional runner if you want to run a second + # runner pool for `arm64` architecture + #infra-runner-arm64: + # node_selector: + # kubernetes.io/os: "linux" + # kubernetes.io/arch: "arm64" + # # Add the corresponding taint to the Kubernetes nodes running `arm64` architecture + # # to prevent Kubernetes pods without node selectors from being scheduled on them. + # tolerations: + # - key: "kubernetes.io/arch" + # operator: "Equal" + # value: "arm64" + # effect: "NoSchedule" + # type: "repository" # can be either 'organization' or 'repository' + # dind_enabled: false # If `true`, a Docker sidecar container will be deployed + # # To run Docker in Docker (dind), change image to summerwind/actions-runner-dind + # # If not running Docker, change image to summerwind/actions-runner use a smaller image + # image: summerwind/actions-runner-dind + # # `scope` is org name for Organization runners, repo name for Repository runners + # scope: "org/infra" + # group: "ArmRunners" + # # Tell Karpenter not to evict this pod while it is running a job. + # # If we do not set this, Karpenter will feel free to terminate the runner while it is running a job, + # # as part of its consolidation efforts, even when using "on demand" instances. + # running_pod_annotations: + # karpenter.sh/do-not-disrupt: "true" + # min_replicas: 0 # Set to so that no ARM instance is running idle, set to 1 for faster startups + # max_replicas: 20 + # scale_down_delay_seconds: 100 + # resources: + # limits: + # cpu: 200m + # memory: 512Mi + # requests: + # cpu: 100m + # memory: 128Mi + # webhook_driven_scaling_enabled: true + # max_duration: "90m" + # pull_driven_scaling_enabled: false + # # Labels are not case-sensitive to GitHub, but *are* case-sensitive + # # to the webhook based autoscaler, which requires exact matches + # # between the `runs-on:` label in the workflow and the runner labels. + # # Leave "common" off the list so that "common" jobs are always + # # scheduled on the amd64 runners. This is because the webhook + # # based autoscaler will not scale a runner pool if the + # # `runs-on:` labels in the workflow match more than one pool. + # labels: + # - "Linux" + # - "linux" + # - "Ubuntu" + # - "ubuntu" + # - "amd64" + # - "AMD64" + # - "core-auto" ``` ### Generating Required Secrets @@ -77,25 +199,27 @@ AWS SSM is used to store and retrieve secrets. Decide on the SSM path for the GitHub secret (PAT or Application private key) and GitHub webhook secret. -Since the secret is automatically scoped by AWS to the account and region where the secret is stored, -we recommend the secret be stored at `/github_runners/controller_github_app_secret` unless you -plan on running multiple instances of the controller. If you plan on running multiple instances of the controller, -and want to give them different access (otherwise they could share the same secret), then you can add -a path component to the SSM path. For example `/github_runners/cicd/controller_github_app_secret`. +Since the secret is automatically scoped by AWS to the account and region where the secret is stored, we recommend the +secret be stored at `/github_runners/controller_github_app_secret` unless you plan on running multiple instances of the +controller. If you plan on running multiple instances of the controller, and want to give them different access +(otherwise they could share the same secret), then you can add a path component to the SSM path. For example +`/github_runners/cicd/controller_github_app_secret`. ``` ssm_github_secret_path: "/github_runners/controller_github_app_secret" ``` -The preferred way to authenticate is by _creating_ and _installing_ a GitHub App. -This is the recommended approach as it allows for more much more restricted access than using a personal access token, -at least until [fine-grained personal access token permissions](https://github.blog/2022-10-18-introducing-fine-grained-personal-access-tokens-for-github/) are generally available. -Follow the instructions [here](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/docs/detailed-docs.md#deploying-using-github-app-authentication) to create and install the GitHub App. +The preferred way to authenticate is by _creating_ and _installing_ a GitHub App. This is the recommended approach as it +allows for more much more restricted access than using a personal access token, at least until +[fine-grained personal access token permissions](https://github.blog/2022-10-18-introducing-fine-grained-personal-access-tokens-for-github/) +are generally available. Follow the instructions +[here](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/docs/detailed-docs.md#deploying-using-github-app-authentication) +to create and install the GitHub App. -At the creation stage, you will be asked to generate a private key. This is the private key that will be used to authenticate -the Action Runner Controller. Download the file and store the contents in SSM using the following command, adjusting the profile -and file name. The profile should be the `admin` role in the account to which you are deploying the runner controller. -The file name should be the name of the private key file you downloaded. +At the creation stage, you will be asked to generate a private key. This is the private key that will be used to +authenticate the Action Runner Controller. Download the file and store the contents in SSM using the following command, +adjusting the profile and file name. The profile should be the `admin` role in the account to which you are deploying +the runner controller. The file name should be the name of the private key file you downloaded. ``` AWS_PROFILE=acme-mgmt-use2-auto-admin chamber write github_runners controller_github_app_secret -- "$(cat APP_NAME.DATE.private-key.pem)" @@ -107,15 +231,15 @@ You can verify the file was correctly written to SSM by matching the private key AWS_PROFILE=acme-mgmt-use2-auto-admin chamber read -q github_runners controller_github_app_secret | openssl rsa -in - -pubout -outform DER | openssl sha256 -binary | openssl base64 ``` -At this stage, record the Application ID and the private key fingerprint in your secrets manager (e.g. 1Password). -You will need the Application ID to configure the runner controller, and want the fingerprint to verify the private key. +At this stage, record the Application ID and the private key fingerprint in your secrets manager (e.g. 1Password). You +will need the Application ID to configure the runner controller, and want the fingerprint to verify the private key. -Proceed to install the GitHub App in the organization or repository you want to use the runner controller for, -and record the Installation ID (the final numeric part of the URL, as explained in the instructions -linked above) in your secrets manager. You will need the Installation ID to configure the runner controller. +Proceed to install the GitHub App in the organization or repository you want to use the runner controller for, and +record the Installation ID (the final numeric part of the URL, as explained in the instructions linked above) in your +secrets manager. You will need the Installation ID to configure the runner controller. -In your stack configuration, set the following variables, making sure to quote the values so they are -treated as strings, not numbers. +In your stack configuration, set the following variables, making sure to quote the values so they are treated as +strings, not numbers. ``` github_app_id: "12345" @@ -123,33 +247,111 @@ github_app_installation_id: "12345" ``` OR (obsolete) -- A PAT with the scope outlined in [this document](https://github.com/actions-runner-controller/actions-runner-controller#deploying-using-pat-authentication). - Save this to the value specified by `ssm_github_token_path` using the following command, adjusting the - AWS_PROFILE to refer to the `admin` role in the account to which you are deploying the runner controller: + +- A PAT with the scope outlined in + [this document](https://github.com/actions-runner-controller/actions-runner-controller#deploying-using-pat-authentication). + Save this to the value specified by `ssm_github_token_path` using the following command, adjusting the AWS_PROFILE to + refer to the `admin` role in the account to which you are deploying the runner controller: ``` AWS_PROFILE=acme-mgmt-use2-auto-admin chamber write github_runners controller_github_app_secret -- "" ``` -2. If using the Webhook Driven autoscaling (recommended), generate a random string to use as the Secret when creating the webhook in GitHub. +2. If using the Webhook Driven autoscaling (recommended), generate a random string to use as the Secret when creating + the webhook in GitHub. Generate the string using 1Password (no special characters, length 45) or by running + ```bash dd if=/dev/random bs=1 count=33 2>/dev/null | base64 ``` Store this key in AWS SSM under the same path specified by `ssm_github_webhook_secret_token_path` + ``` ssm_github_webhook_secret_token_path: "/github_runners/github_webhook_secret" ``` -### Using Webhook Driven Autoscaling +### Dockerhub Authentication + +Authenticating with Dockerhub is optional but when enabled can ensure stability by increasing the number of pulls +allowed from your runners. + +To get started set `docker_config_json_enabled` to `true` and `ssm_docker_config_json_path` to the SSM path where the +credentials are stored, for example `github_runners/docker`. + +To create the credentials file, fill out a JSON file locally with the following content: + +```json +{ + "auths": { + "https://index.docker.io/v1/": { + "username": "your_username", + "password": "your_password", + "email": "your_email", + "auth": "$(echo "your_username: your_password" | base64)" + } + } +} +``` + +Then write the file to SSM with the following Atmos Workflow: + +```yaml +save/docker-config-json: + description: Prompt for uploading Docker Config JSON to the AWS SSM Parameter Store + steps: + - type: shell + command: |- + echo "Please enter the Docker Config JSON file path" + echo "See https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry for information on how to create the file" + read -p "Docker Config JSON file path: " -r DOCKER_CONFIG_JSON_FILE_PATH + if [ -z "DOCKER_CONFIG_JSON_FILE_PATH" ] + then + echo 'Inputs cannot be blank please try again!' + exit 0 + fi + + DOCKER_CONFIG_JSON=$(<$DOCKER_CONFIG_JSON_FILE_PATH); + ENCODED_DOCKER_CONFIG_JSON=$(echo "$DOCKER_CONFIG_JSON" | base64 -w 0 ); + + echo $DOCKER_CONFIG_JSON + echo $ENCODED_DOCKER_CONFIG_JSON + + AWS_PROFILE=acme-core-gbl-auto-admin + + set -e + + chamber write github_runners/docker config-json -- "$ENCODED_DOCKER_CONFIG_JSON" + + echo 'Saved Docker Config JSON to the AWS SSM Parameter Store' +``` + +Don't forget to update the AWS Profile in the script. + +### Using Runner Groups -To use the Webhook Driven autoscaling, you must also install the GitHub organization-level webhook after deploying the component -(specifically, the webhook server). The URL for the webhook is determined by the `webhook.hostname_template` and where -it is deployed. Recommended URL is `https://gha-webhook.[environment].[stage].[tenant].[service-discovery-domain]`. +GitHub supports grouping runners into distinct +[Runner Groups](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups), +which allow you to have different access controls for different runners. Read the linked documentation about creating +and configuring Runner Groups, which you must do through the GitHub Web UI. If you choose to create Runner Groups, you +can assign one or more Runner pools (from the `runners` map) to groups (only one group per runner pool) by including +`group: ` in the runner configuration. We recommend including it immediately after `scope`. + +### Using Webhook Driven Autoscaling (recommended) + +We recommend using Webhook Driven Autoscaling until GitHub's own autoscaling solution is as capable as the Summerwind +solution this component deploys. See +[this discussion](https://github.com/actions/actions-runner-controller/discussions/3340) for some perspective on why the +Summerwind solution is currently (summer 2024) considered superior. + +To use the Webhook Driven Autoscaling, in addition to setting `webhook_driven_scaling_enabled` to `true`, you must also +install the GitHub organization-level webhook after deploying the component (specifically, the webhook server). The URL +for the webhook is determined by the `webhook.hostname_template` and where it is deployed. Recommended URL is +`https://gha-webhook.[environment].[stage].[tenant].[service-discovery-domain]`. As a GitHub organization admin, go to `https://github.com/organizations/[organization]/settings/hooks`, and then: + - Click"Add webhook" and create a new webhook with the following settings: - Payload URL: copy from Terraform output `webhook_payload_url` - Content type: `application/json` @@ -161,27 +363,134 @@ As a GitHub organization admin, go to `https://github.com/organizations/[organiz - Ensure that "Active" is checked (should be checked by default) - Click "Add webhook" at the bottom of the settings page -After the webhook is created, select "edit" for the webhook and go to the "Recent Deliveries" tab and verify that there is a delivery -(of a "ping" event) with a green check mark. If not, verify all the settings and consult -the logs of the `actions-runner-controller-github-webhook-server` pod. +After the webhook is created, select "edit" for the webhook and go to the "Recent Deliveries" tab and verify that there +is a delivery (of a "ping" event) with a green check mark. If not, verify all the settings and consult the logs of the +`actions-runner-controller-github-webhook-server` pod. + +### Configuring Webhook Driven Autoscaling + +The `HorizontalRunnerAutoscaler scaleUpTriggers.duration` (see [Webhook Driven Scaling documentation](https://github. +com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#webhook-driven-scaling)) is +controlled by the `max_duration` setting for each Runner. The purpose of this timeout is to ensure, in case a job +cancellation or termination event gets missed, that the resulting idle runner eventually gets terminated. + +#### How the Autoscaler Determines the Desired Runner Pool Size + +When a job is queued, a `capacityReservation` is created for it. The HRA (Horizontal Runner Autoscaler) sums up all the +capacity reservations to calculate the desired size of the runner pool, subject to the limits of `minReplicas` and +`maxReplicas`. The idea is that a `capacityReservation` is deleted when a job is completed or canceled, and the pool +size will be equal to `jobsStarted - jobsFinished`. However, it can happen that a job will finish without the HRA being +successfully notified about it, so as a safety measure, the `capacityReservation` will expire after a configurable +amount of time, at which point it will be deleted without regard to the job being finished. This ensures that eventually +an idle runner pool will scale down to `minReplicas`. + +If it happens that the capacity reservation expires before the job is finished, the Horizontal Runner Autoscaler (HRA) +will scale down the pool by 2 instead of 1: once because the capacity reservation expired, and once because the job +finished. This will also cause starvation of waiting jobs, because the next in line will have its timeout timer started +but will not actually start running because no runner is available. And if `minReplicas` is set to zero, the pool will +scale down to zero before finishing all the jobs, leaving some waiting indefinitely. This is why it is important to set +the `max_duration` to a time long enough to cover the full time a job may have to wait between the time it is queued and +the time it finishes, assuming that the HRA scales up the pool by 1 and runs the job on the new runner. + +> [!TIP] +> +> If there are more jobs queued than there are runners allowed by `maxReplicas`, the timeout timer does not start on the +> capacity reservation until enough reservations ahead of it are removed for it to be considered as representing and +> active job. Although there are some edge cases regarding `max_duration` that seem not to be covered properly (see +> [actions-runner-controller issue #2466](https://github.com/actions/actions-runner-controller/issues/2466)), they only +> merit adding a few extra minutes to the timeout. + +### Recommended `max_duration` Duration + +#### Consequences of Too Short of a `max_duration` Duration + +If you set `max_duration` to too short a duration, the Horizontal Runner Autoscaler will cancel capacity reservations +for jobs that have not yet finished, and the pool will become too small. This will be most serious if you have set +`minReplicas = 0` because in this case, jobs will be left in the queue indefinitely. With a higher value of +`minReplicas`, the pool will eventually make it through all the queued jobs, but not as quickly as intended due to the +incorrectly reduced capacity. + +#### Consequences of Too Long of a `max_duration` Duration -Useful Reference +If the Horizontal Runner Autoscaler misses a scale-down event (which can happen because events do not have delivery +guarantees), a runner may be left running idly for as long as the `max_duration` duration. The only problem with this is +the added expense of leaving the idle runner running. + +#### Recommendation + +As a result, we recommend setting `max_duration` to a period long enough to cover: + +- The time it takes for the HRA to scale up the pool and make a new runner available +- The time it takes for the runner to pick up the job from GitHub +- The time it takes for the job to start running on the new runner +- The maximum time a job might take + +Because the consequences of expiring a capacity reservation before the job is finished can be severe, we recommend +setting `max_duration` to a period at least 30 minutes longer than you expect the longest job to take. Remember, when +everything works properly, the HRA will scale down the pool as jobs finish, so there is little cost to setting a long +duration, and the cost looks even smaller by comparison to the cost of having too short a duration. + +For lightly used runner pools expecting only short jobs, you can set `max_duration` to `"30m"`. As a rule of thumb, we +recommend setting `maxReplicas` high enough that jobs never wait on the queue more than an hour. + +### Interaction with Karpenter or other EKS autoscaling solutions + +Kubernetes cluster autoscaling solutions generally expect that a Pod runs a service that can be terminated on one Node +and restarted on another with only a short duration needed to finish processing any in-flight requests. When the cluster +is resized, the cluster autoscaler will do just that. However, GitHub Action Runner Jobs do not fit this model. If a Pod +is terminated in the middle of a job, the job is lost. The likelihood of this happening is increased by the fact that +the Action Runner Controller Autoscaler is expanding and contracting the size of the Runner Pool on a regular basis, +causing the cluster autoscaler to more frequently want to scale up or scale down the EKS cluster, and, consequently, to +move Pods around. + +To handle these kinds of situations, Karpenter respects an annotation on the Pod: + +```yaml +spec: + template: + metadata: + annotations: + karpenter.sh/do-not-disrupt: "true" +``` + +When you set this annotation on the Pod, Karpenter will not evict it. This means that the Pod will stay on the Node it +is on, and the Node it is on will not be considered for eviction. This is good because it means that the Pod will not be +terminated in the middle of a job. However, it also means that the Node the Pod is on will not be considered for +termination, which means that the Node will not be removed from the cluster, which means that the cluster will not +shrink in size when you would like it to. + +Since the Runner Pods terminate at the end of the job, this is not a problem for the Pods actually running jobs. +However, if you have set `minReplicas > 0`, then you have some Pods that are just idling, waiting for jobs to be +assigned to them. These Pods are exactly the kind of Pods you want terminated and moved when the cluster is +underutilized. Therefore, when you set `minReplicas > 0`, you should **NOT** set `karpenter.sh/do-not-evict: "true"` on +the Pod via the `pod_annotations` attribute of the `runners` input. (**But wait**, _there is good news_!) + +We have [requested a feature](https://github.com/actions/actions-runner-controller/issues/2562) that will allow you to +set `karpenter.sh/do-not-disrupt: "true"` and `minReplicas > 0` at the same time by only annotating Pods running jobs. +Meanwhile, **we have implemented this for you** using a job startup hook. This hook will set annotations on the Pod when +the job starts. When the job finishes, the Pod will be deleted by the controller, so the annotations will not need to be +removed. Configure annotations that apply only to Pods running jobs in the `running_pod_annotations` attribute of the +`runners` input. ### Updating CRDs When updating the chart or application version of `actions-runner-controller`, it is possible you will need to install -new CRDs. Such a requirement should be indicated in the `actions-runner-controller` release notes and may require some adjustment to our -custom chart or configuration. +new CRDs. Such a requirement should be indicated in the `actions-runner-controller` release notes and may require some +adjustment to our custom chart or configuration. -This component uses `helm` to manage the deployment, and `helm` will not auto-update CRDs. -If new CRDs are needed, install them manually via a command like +This component uses `helm` to manage the deployment, and `helm` will not auto-update CRDs. If new CRDs are needed, +install them manually via a command like ``` kubectl create -f https://raw.githubusercontent.com/actions-runner-controller/actions-runner-controller/master/charts/actions-runner-controller/crds/actions.summerwind.dev_horizontalrunnerautoscalers.yaml ``` -Consult [actions-runner-controller](https://github.com/actions-runner-controller/actions-runner-controller) documentation for further details. +### Useful Reference + +Consult [actions-runner-controller](https://github.com/actions-runner-controller/actions-runner-controller) +documentation for further details. + ## Requirements @@ -190,7 +499,7 @@ Consult [actions-runner-controller](https://github.com/actions-runner-controller | [terraform](#requirement\_terraform) | >= 1.3.0 | | [aws](#requirement\_aws) | >= 4.9.0 | | [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0, != 2.21.0 | ## Providers @@ -202,9 +511,9 @@ Consult [actions-runner-controller](https://github.com/actions-runner-controller | Name | Source | Version | |------|--------|---------| -| [actions\_runner](#module\_actions\_runner) | cloudposse/helm-release/aws | 0.7.0 | -| [actions\_runner\_controller](#module\_actions\_runner\_controller) | cloudposse/helm-release/aws | 0.7.0 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [actions\_runner](#module\_actions\_runner) | cloudposse/helm-release/aws | 0.10.1 | +| [actions\_runner\_controller](#module\_actions\_runner\_controller) | cloudposse/helm-release/aws | 0.10.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -213,6 +522,7 @@ Consult [actions-runner-controller](https://github.com/actions-runner-controller | Name | Type | |------|------| | [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_ssm_parameter.docker_config_json](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | | [aws_ssm_parameter.github_token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | | [aws_ssm_parameter.github_webhook_secret_token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | @@ -230,26 +540,28 @@ Consult [actions-runner-controller](https://github.com/actions-runner-controller | [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `null` | no | | [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [context\_tags\_enabled](#input\_context\_tags\_enabled) | Whether or not to include all context tags as labels for each runner | `bool` | `false` | no | +| [controller\_replica\_count](#input\_controller\_replica\_count) | The number of replicas of the runner-controller to run. | `number` | `2` | no | | [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false`. | `bool` | `null` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [docker\_config\_json\_enabled](#input\_docker\_config\_json\_enabled) | Whether the Docker config JSON is enabled | `bool` | `false` | no | | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [existing\_kubernetes\_secret\_name](#input\_existing\_kubernetes\_secret\_name) | If you are going to create the Kubernetes Secret the runner-controller will use
by some means (such as SOPS) outside of this component, set the name of the secret
here and it will be used. In this case, this component will not create a secret
and you can leave the secret-related inputs with their default (empty) values.
The same secret will be used by both the runner-controller and the webhook-server. | `string` | `""` | no | | [github\_app\_id](#input\_github\_app\_id) | The ID of the GitHub App to use for the runner controller. | `string` | `""` | no | | [github\_app\_installation\_id](#input\_github\_app\_installation\_id) | The "Installation ID" of the GitHub App to use for the runner controller. | `string` | `""` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -264,8 +576,9 @@ Consult [actions-runner-controller](https://github.com/actions-runner-controller | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region. | `string` | n/a | yes | | [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
| n/a | yes | -| [runners](#input\_runners) | Map of Action Runner configurations, with the key being the name of the runner. Please note that the name must be in
kebab-case.

For example:
hcl
organization_runner = {
type = "organization" # can be either 'organization' or 'repository'
dind_enabled: false # A Docker sidecar container will be deployed
image: summerwind/actions-runner # If dind_enabled=true, set this to 'summerwind/actions-runner-dind'
scope = "ACME" # org name for Organization runners, repo name for Repository runners
scale_down_delay_seconds = 300
min_replicas = 1
max_replicas = 5
busy_metrics = {
scale_up_threshold = 0.75
scale_down_threshold = 0.25
scale_up_factor = 2
scale_down_factor = 0.5
}
labels = [
"Ubuntu",
"core-automation",
]
}
|
map(object({
type = string
scope = string
image = optional(string, "")
dind_enabled = bool
scale_down_delay_seconds = number
min_replicas = number
max_replicas = number
busy_metrics = optional(object({
scale_up_threshold = string
scale_down_threshold = string
scale_up_adjustment = optional(string)
scale_down_adjustment = optional(string)
scale_up_factor = optional(string)
scale_down_factor = optional(string)
}))
webhook_driven_scaling_enabled = bool
pull_driven_scaling_enabled = bool
labels = list(string)
storage = optional(string, "")
resources = object({
limits = object({
cpu = string
memory = string
ephemeral_storage = optional(string, "")
})
requests = object({
cpu = string
memory = string
})
})
}))
| n/a | yes | +| [runners](#input\_runners) | Map of Action Runner configurations, with the key being the name of the runner. Please note that the name must be in
kebab-case.

For example:
hcl
organization_runner = {
type = "organization" # can be either 'organization' or 'repository'
dind_enabled: true # A Docker daemon will be started in the runner Pod
image: summerwind/actions-runner-dind # If dind_enabled=false, set this to 'summerwind/actions-runner'
scope = "ACME" # org name for Organization runners, repo name for Repository runners
group = "core-automation" # Optional. Assigns the runners to a runner group, for access control.
scale_down_delay_seconds = 300
min_replicas = 1
max_replicas = 5
labels = [
"Ubuntu",
"core-automation",
]
}
|
map(object({
type = string
scope = string
group = optional(string, null)
image = optional(string, "summerwind/actions-runner-dind")
auto_update_enabled = optional(bool, true)
dind_enabled = optional(bool, true)
node_selector = optional(map(string), {})
pod_annotations = optional(map(string), {})

# running_pod_annotations are only applied to the pods once they start running a job
running_pod_annotations = optional(map(string), {})

# affinity is too complex to model. Whatever you assigned affinity will be copied
# to the runner Pod spec.
affinity = optional(any)

tolerations = optional(list(object({
key = string
operator = string
value = optional(string, null)
effect = string
})), [])
scale_down_delay_seconds = optional(number, 300)
min_replicas = number
max_replicas = number
# Scheduled overrides. See https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#scheduled-overrides
# Order is important. The earlier entry is prioritized higher than later entries. So you usually define
# one-time overrides at the top of your list, then yearly, monthly, weekly, and lastly daily overrides.
scheduled_overrides = optional(list(object({
start_time = string # ISO 8601 format, eg, "2021-06-01T00:00:00+09:00"
end_time = string # ISO 8601 format, eg, "2021-06-01T00:00:00+09:00"
min_replicas = optional(number)
max_replicas = optional(number)
recurrence_rule = optional(object({
frequency = string # One of Daily, Weekly, Monthly, Yearly
until_time = optional(string) # ISO 8601 format time after which the schedule will no longer apply
}))
})), [])
busy_metrics = optional(object({
scale_up_threshold = string
scale_down_threshold = string
scale_up_adjustment = optional(string)
scale_down_adjustment = optional(string)
scale_up_factor = optional(string)
scale_down_factor = optional(string)
}))
webhook_driven_scaling_enabled = optional(bool, true)
# max_duration is the duration after which a job will be considered completed,
# even if the webhook has not received a "job completed" event.
# This is to ensure that if an event is missed, it does not leave the runner running forever.
# Set it long enough to cover the longest job you expect to run and then some.
# See https://github.com/actions/actions-runner-controller/blob/9afd93065fa8b1f87296f0dcdf0c2753a0548cb7/docs/automatically-scaling-runners.md?plain=1#L264-L268
# Defaults to 1 hour programmatically (to be able to detect if both max_duration and webhook_startup_timeout are set).
max_duration = optional(string)
# The name `webhook_startup_timeout` was misleading and has been deprecated.
# It has been renamed `max_duration`.
webhook_startup_timeout = optional(string)
# Adjust the time (in seconds) to wait for the Docker in Docker daemon to become responsive.
wait_for_docker_seconds = optional(string, "")
pull_driven_scaling_enabled = optional(bool, false)
labels = optional(list(string), [])
# If not null, `docker_storage` specifies the size (as `go` string) of
# an ephemeral (default storage class) Persistent Volume to allocate for the Docker daemon.
# Takes precedence over `tmpfs_enabled` for the Docker daemon storage.
docker_storage = optional(string, null)
# storage is deprecated in favor of docker_storage, since it is only storage for the Docker daemon
storage = optional(string, null)
# If `pvc_enabled` is true, a Persistent Volume Claim will be created for the runner
# and mounted at /home/runner/work/shared. This is useful for sharing data between runners.
pvc_enabled = optional(bool, false)
# If `tmpfs_enabled` is `true`, both the runner and the docker daemon will use a tmpfs volume,
# meaning that all data will be stored in RAM rather than on disk, bypassing disk I/O limitations,
# but what would have been disk usage is now additional memory usage. You must specify memory
# requests and limits when using tmpfs or else the Pod will likely crash the Node.
tmpfs_enabled = optional(bool)
resources = optional(object({
limits = optional(object({
cpu = optional(string, "1")
memory = optional(string, "1Gi")
# ephemeral-storage is the Kubernetes name, but `ephemeral_storage` is the gomplate name,
# so allow either. If both are specified, `ephemeral-storage` takes precedence.
ephemeral-storage = optional(string)
ephemeral_storage = optional(string, "10Gi")
}), {})
requests = optional(object({
cpu = optional(string, "500m")
memory = optional(string, "256Mi")
# ephemeral-storage is the Kubernetes name, but `ephemeral_storage` is the gomplate name,
# so allow either. If both are specified, `ephemeral-storage` takes precedence.
ephemeral-storage = optional(string)
ephemeral_storage = optional(string, "1Gi")
}), {})
}), {})
}))
| n/a | yes | | [s3\_bucket\_arns](#input\_s3\_bucket\_arns) | List of ARNs of S3 Buckets to which the runners will have read-write access to. | `list(string)` | `[]` | no | +| [ssm\_docker\_config\_json\_path](#input\_ssm\_docker\_config\_json\_path) | SSM path to the Docker config JSON | `string` | `null` | no | | [ssm\_github\_secret\_path](#input\_ssm\_github\_secret\_path) | The path in SSM to the GitHub app private key file contents or GitHub PAT token. | `string` | `""` | no | | [ssm\_github\_webhook\_secret\_token\_path](#input\_ssm\_github\_webhook\_secret\_token\_path) | The path in SSM to the GitHub Webhook Secret token. | `string` | `""` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | @@ -273,7 +586,7 @@ Consult [actions-runner-controller](https://github.com/actions-runner-controller | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | | [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `null` | no | -| [webhook](#input\_webhook) | Configuration for the GitHub Webhook Server.
`hostname_template` is the `format()` string to use to generate the hostname via `format(var.hostname_template, var.tenant, var.stage, var.environment)`"
Typically something like `"echo.%[3]v.%[2]v.example.com"`. |
object({
enabled = bool
hostname_template = string
})
|
{
"enabled": false,
"hostname_template": null
}
| no | +| [webhook](#input\_webhook) | Configuration for the GitHub Webhook Server.
`hostname_template` is the `format()` string to use to generate the hostname via `format(var.hostname_template, var.tenant, var.stage, var.environment)`"
Typically something like `"echo.%[3]v.%[2]v.example.com"`.
`queue_limit` is the maximum number of webhook events that can be queued up for processing by the autoscaler.
When the queue gets full, webhook events will be dropped (status 500). |
object({
enabled = bool
hostname_template = string
queue_limit = optional(number, 1000)
})
|
{
"enabled": false,
"hostname_template": null,
"queue_limit": 1000
}
| no | ## Outputs @@ -283,10 +596,12 @@ Consult [actions-runner-controller](https://github.com/actions-runner-controller | [metadata\_action\_runner\_releases](#output\_metadata\_action\_runner\_releases) | Block statuses of the deployed actions-runner chart releases | | [webhook\_payload\_url](#output\_webhook\_payload\_url) | Payload URL for GitHub webhook | + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eks/actions-runner-controller) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/actions-runner-controller) - + Cloud Posse's upstream component - [alb-controller](https://artifacthub.io/packages/helm/aws/aws-load-balancer-controller) - Helm Chart - [alb-controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) - AWS Load Balancer Controller - [actions-runner-controller Webhook Driven Scaling](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/docs/detailed-docs.md#webhook-driven-scaling) diff --git a/modules/eks/actions-runner-controller/charts/actions-runner/Chart.yaml b/modules/eks/actions-runner-controller/charts/actions-runner/Chart.yaml index 8aea5901e..1bfa1968d 100644 --- a/modules/eks/actions-runner-controller/charts/actions-runner/Chart.yaml +++ b/modules/eks/actions-runner-controller/charts/actions-runner/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.3.2 # This chart only deploys Resources for actions-runner-controller, so app version does not really apply. # We use Resource API version instead. diff --git a/modules/eks/actions-runner-controller/charts/actions-runner/templates/horizontalrunnerautoscaler.yaml b/modules/eks/actions-runner-controller/charts/actions-runner/templates/horizontalrunnerautoscaler.yaml index b6db4c843..eda4813a7 100644 --- a/modules/eks/actions-runner-controller/charts/actions-runner/templates/horizontalrunnerautoscaler.yaml +++ b/modules/eks/actions-runner-controller/charts/actions-runner/templates/horizontalrunnerautoscaler.yaml @@ -10,6 +10,27 @@ spec: name: {{ .Values.release_name }} minReplicas: {{ .Values.min_replicas }} maxReplicas: {{ .Values.max_replicas }} + {{- with .Values.scheduled_overrides }} + scheduledOverrides: + {{- range . }} + - startTime: "{{ .start_time }}" + endTime: "{{ .end_time }}" + {{- with .recurrence_rule }} + recurrenceRule: + frequency: {{ .frequency }} + {{- if .until_time }} + untilTime: "{{ .until_time }}" + {{- end }} + {{- end }} + {{- with .min_replicas }} + minReplicas: {{ . }} + {{- end }} + {{- with .max_replicas }} + maxReplicas: {{ . }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.pull_driven_scaling_enabled }} metrics: - type: PercentageRunnersBusy @@ -31,5 +52,7 @@ spec: - githubEvent: workflowJob: {} amount: 1 - duration: "{{ .Values.scale_down_delay_seconds }}s" + {{- if .Values.max_duration }} + duration: "{{ .Values.max_duration }}" + {{- end }} {{- end }} diff --git a/modules/eks/actions-runner-controller/charts/actions-runner/templates/runnerdeployment.yaml b/modules/eks/actions-runner-controller/charts/actions-runner/templates/runnerdeployment.yaml index 600364cab..97382feda 100644 --- a/modules/eks/actions-runner-controller/charts/actions-runner/templates/runnerdeployment.yaml +++ b/modules/eks/actions-runner-controller/charts/actions-runner/templates/runnerdeployment.yaml @@ -1,13 +1,159 @@ +{{- $release_name := .Values.release_name }} +{{- /* To avoid the situation where a value evaluates to +a string value of "false", which has a boolean value of true, +we explicitly convert to boolean based on the string value */}} +{{- $use_tmpfs := eq (printf "%v" .Values.tmpfs_enabled) "true" }} +{{- $use_pvc := eq (printf "%v" .Values.pvc_enabled) "true" }} +{{- $use_dockerconfig := eq (printf "%v" .Values.docker_config_json_enabled) "true" }} +{{- $use_dind := eq (printf "%v" .Values.dind_enabled) "true" }} +{{- /* Historically, the docker daemon was run in a sidecar. + At some point, the option became available to use dockerdWithinRunnerContainer, + and we now default to that. In fact, at this moment, the sidecar option is not configurable. + We keep the logic here in case we need to revert to the sidecar option. */}} +{{- $use_dind_in_runner := $use_dind }} +{{- if $use_pvc }} +--- +# Persistent Volumes can be used for image caching +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ $release_name }} +spec: + accessModes: + - ReadWriteMany + # StorageClassName comes from efs-controller and must be deployed first. + storageClassName: efs-sc + resources: + requests: + # EFS is not actually storage constrained, but this storage request is + # required. 100Gi is a ballpark for how much we initially request, but this + # may grow. We are responsible for docker pruning this periodically to + # save space. + storage: 100Gi +{{- end }} +{{- if $use_dockerconfig }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $release_name }}-regcred +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ .Values.docker_config_json }} +{{- end }} +{{- with .Values.running_pod_annotations }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $release_name }}-runner-hooks +data: + annotate.sh: | + #!/bin/bash + + # If we had kubectl and a KUBECONFIG, we could do this: + # kubectl annotate pod $HOSTNAME 'karpenter.sh/do-not-evict="true"' --overwrite + # kubectl annotate pod $HOSTNAME 'karpenter.sh/do-not-disrupt="true"' --overwrite + + # This is the same thing, the hard way + + # Metadata about the pod + NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + POD_NAME=$(hostname) + + # Kubernetes API URL + API_URL="https://kubernetes.default.svc" + + # Read the service account token + TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + + # Content type + CONTENT_TYPE="application/merge-patch+json" + + PATCH_JSON=$(cat <<'EOF' + { + "metadata": { + "annotations": + {{- . | toJson | nindent 10 }} + } + } + EOF + ) + + # Use curl to patch the pod + curl -sSk -X PATCH \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: $CONTENT_TYPE" \ + -H "Accept: application/json" \ + -d "$PATCH_JSON" \ + "$API_URL/api/v1/namespaces/$NAMESPACE/pods/$POD_NAME" | jq .metadata.annotations \ + && AT=$(date -u +"%Y-%m-%dT%H:%M:%S.%3Nz") || code=$? + + if [ -z "$AT" ]; then + echo "Failed (curl exited with status ${code}) to annotate pod with annotations:\n '%s'\n" '{{ . | toJson }}' + exit $code + else + printf "Annotated pod at %s with annotations:\n '%s'\n" "$AT" '{{ . | toJson }}' + fi + +{{ end }} +--- apiVersion: actions.summerwind.dev/v1alpha1 kind: RunnerDeployment metadata: - name: {{ .Values.release_name }} + name: {{ $release_name }} spec: # Do not use `replicas` with HorizontalRunnerAutoscaler # See https://github.com/actions-runner-controller/actions-runner-controller/issues/206#issuecomment-748601907 # replicas: 1 template: + {{- with .Values.pod_annotations }} + metadata: + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} spec: + {{- if $use_dockerconfig }} + # secrets volumeMount are always mounted readOnly so config.json has to be copied to the correct directory + # https://github.com/kubernetes/kubernetes/issues/62099 + # https://github.com/actions/actions-runner-controller/issues/2123#issuecomment-1527077517 + + initContainers: + - name: docker-config-writer + image: {{ .Values.image | quote }} + command: [ "sh", "-c", "cat /home/.docker/config.json > /home/runner/.docker/config.json" ] + volumeMounts: + - mountPath: /home/.docker/ + name: docker-secret + - mountPath: /home/runner/.docker + name: docker-config-volume + {{- end }} + + # As of 2023-03-31 + # Recommended by https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md + terminationGracePeriodSeconds: 100 + env: + # RUNNER_GRACEFUL_STOP_TIMEOUT is the time the runner will give itself to try to finish + # a job before it gracefully cancels itself in response to a pod termination signal. + # It should be less than the terminationGracePeriodSeconds above so that it has time + # to report its status and deregister itself from the runner pool. + - name: RUNNER_GRACEFUL_STOP_TIMEOUT + value: "80" + - name: DISABLE_RUNNER_UPDATE + value: "{{ printf "%v" (not .Values.auto_update_enabled) }}" + {{- with .Values.wait_for_docker_seconds }} + # If Docker is taking too long to start (which is likely due to some other performance issue), + # increase the timeout from the default of 120 seconds. + - name: WAIT_FOR_DOCKER_SECONDS + value: "{{ . }}" + {{- end }} + {{- if $use_tmpfs }} + - name: RUNNER_HOME + value: "/runner-tmpfs" + - name: RUNNER_WORKDIR + value: "/runner-tmpfs/_work" + {{- end }} + # You could reserve nodes for runners by labeling and tainting nodes with # node-role.kubernetes.io/actions-runner # and then adding the following to this RunnerDeployment @@ -19,13 +165,43 @@ spec: # - effect: NoSchedule # key: node-role.kubernetes.io/actions-runner # operator: Exists + {{- with .Values.node_selector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} - {{ if eq .Values.type "organization" }} + {{- with .Values.running_pod_annotations }} + # Run a pre-run hook to set pod annotations + # See https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/running-scripts-before-or-after-a-job#triggering-the-scripts + containers: + - name: runner + # ARC (Summerwind) has its own pre-run hook, so we do not want to set + # env: + # - name: ACTIONS_RUNNER_HOOK_JOB_STARTED + # value: /hooks/pre-run.sh # triggers when a job is started, and sets the pod to NOT safe-to-evict + # Instead, its pre-run hook runs scripts in /etc/arc/hooks/job-started.d/ + volumeMounts: + - name: hooks + mountPath: /etc/arc/hooks/job-started.d/ + {{- end }} + + {{- if eq .Values.type "organization" }} organization: {{ .Values.scope }} - {{ end }} - {{ if eq .Values.type "repository" }} + {{- end }} + {{- if eq .Values.type "repository" }} repository: {{ .Values.scope }} - {{ end }} + {{- end }} + {{- if index .Values "group" }} + group: {{ .Values.group }} + {{- end }} # You can use labels to create subsets of runners. # See https://github.com/summerwind/actions-runner-controller#runner-labels # and https://docs.github.com/en/free-pro-team@latest/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow @@ -34,35 +210,113 @@ spec: # to explicitly include the "self-hosted" label in order to match the # workflow_job to it. - self-hosted - {{- range .Values.labels }} + {{- range .Values.labels }} - {{ . | quote }} - {{- end }} + {{- end }} # dockerdWithinRunnerContainer = false means access to a Docker daemon is provided by a sidecar container. - dockerdWithinRunnerContainer: {{ .Values.dind_enabled }} + dockerdWithinRunnerContainer: {{ $use_dind_in_runner }} + dockerEnabled: {{ $use_dind }} image: {{ .Values.image | quote }} imagePullPolicy: IfNotPresent + {{- if $use_dockerconfig }} + imagePullSecrets: + - name: {{ $release_name }}-regcred + {{- end }} serviceAccountName: {{ .Values.service_account_name }} resources: limits: cpu: {{ .Values.resources.limits.cpu }} memory: {{ .Values.resources.limits.memory }} - {{- if and .Values.dind_enabled .Values.resources.limits.ephemeral_storage }} + {{- if index .Values.resources.limits "ephemeral-storage" }} + ephemeral-storage: {{ index .Values.resources.limits "ephemeral-storage" }} + {{- else }} + {{- if index .Values.resources.limits "ephemeral_storage" }} ephemeral-storage: {{ .Values.resources.limits.ephemeral_storage }} {{- end }} + {{- end }} requests: cpu: {{ .Values.resources.requests.cpu }} memory: {{ .Values.resources.requests.memory }} - {{- if and .Values.dind_enabled .Values.storage }} + {{- if index .Values.resources.requests "ephemeral-storage" }} + ephemeral-storage: {{ index .Values.resources.requests "ephemeral-storage" }} + {{- else }} + {{- if index .Values.resources.requests "ephemeral_storage" }} + ephemeral-storage: {{ .Values.resources.requests.ephemeral_storage }} + {{- end }} + {{- end }} + {{- if and (not $use_dind_in_runner) (or .Values.docker_storage $use_tmpfs) }} + {{- /* dockerVolumeMounts are mounted into the docker sidecar, and ignored if running with dockerdWithinRunnerContainer */}} dockerVolumeMounts: - - mountPath: /var/lib/docker - name: docker-volume - volumes: - - name: docker-volume - ephemeral: - volumeClaimTemplate: - spec: - accessModes: [ "ReadWriteOnce" ] # Only 1 pod can connect at a time - resources: - requests: - storage: {{ .Values.storage }} + - mountPath: /var/lib/docker + name: docker-volume {{- end }} + {{- if or $use_pvc $use_dockerconfig $use_tmpfs }} + volumeMounts: + {{- if and $use_dind_in_runner (or .Values.docker_storage $use_tmpfs) }} + - mountPath: /var/lib/docker + name: docker-volume + {{- end }} + {{- if $use_pvc }} + - mountPath: /home/runner/work/shared + name: shared-volume + {{- end }} + {{- if $use_dockerconfig }} + - mountPath: /home/.docker/ + name: docker-secret + - mountPath: /home/runner/.docker + name: docker-config-volume + {{- end }} + {{- if $use_tmpfs }} + - mountPath: /tmp + name: tmp + - mountPath: /runner-tmpfs + name: runner-tmpfs + {{- end }} + {{- end }}{{/* End of volumeMounts */}} + {{- if or (and $use_dind (or .Values.docker_storage $use_tmpfs)) $use_pvc $use_dockerconfig (not (empty .Values.running_pod_annotations)) }} + volumes: + {{- if $use_tmpfs }} + - name: runner-tmpfs + emptyDir: + medium: Memory + - name: tmp + emptyDir: + medium: Memory + {{- end }} + {{- if and $use_dind (or .Values.docker_storage $use_tmpfs) }} + - name: docker-volume + {{- if .Values.docker_storage }} + ephemeral: + volumeClaimTemplate: + spec: + accessModes: [ "ReadWriteOnce" ] # Only 1 pod can connect at a time + resources: + requests: + storage: {{ .Values.docker_storage }} + {{- else }} + emptyDir: + medium: Memory + {{- end }} + {{- end }} + {{- if $use_pvc }} + - name: shared-volume + persistentVolumeClaim: + claimName: {{ $release_name }} + {{- end }} + {{- if $use_dockerconfig }} + - name: docker-secret + secret: + secretName: {{ $release_name }}-regcred + items: + - key: .dockerconfigjson + path: config.json + - name: docker-config-volume + emptyDir: + {{- end }} + {{- with .Values.running_pod_annotations }} + - name: hooks + configMap: + name: {{ $release_name }}-runner-hooks + defaultMode: 0755 # Set execute permissions for all files + {{- end }} + {{- end }}{{/* End of volumes */}} diff --git a/modules/eks/actions-runner-controller/charts/actions-runner/values.yaml b/modules/eks/actions-runner-controller/charts/actions-runner/values.yaml index ad87705be..c5c96f3c1 100644 --- a/modules/eks/actions-runner-controller/charts/actions-runner/values.yaml +++ b/modules/eks/actions-runner-controller/charts/actions-runner/values.yaml @@ -2,26 +2,30 @@ type: "repository" # can be either 'organization' or 'repository' dind_enabled: true # If `true`, a Docker sidecar container will be deployed # To run Docker in Docker (dind), change image from summerwind/actions-runner to summerwind/actions-runner-dind image: summerwind/actions-runner-dind -scope: "example/app" -scale_down_delay_seconds: 300 -min_replicas: 1 -max_replicas: 2 -busy_metrics: - scale_up_threshold: 0.75 - scale_down_threshold: 0.25 - scale_up_factor: 2 - scale_down_factor: 0.5 -resources: - limits: - cpu: 1.5 - memory: 4Gi - ephemeral_storage: "10Gi" - requests: - cpu: 0.5 - memory: 1Gi -storage: "10Gi" -webhook_driven_scaling_enabled: false + +#scope: "example/app" +#scale_down_delay_seconds: 300 +#min_replicas: 1 +#max_replicas: 2 +#busy_metrics: +# scale_up_threshold: 0.75 +# scale_down_threshold: 0.25 +# scale_up_factor: 2 +# scale_down_factor: 0.5 +#resources: +# limits: +# cpu: 1 +# memory: 1Gi +# ephemeral_storage: "10Gi" +# requests: +# cpu: 500m +# memory: 512Mi +# ephemeral_storage: "1Gi" + +pvc_enabled: false +webhook_driven_scaling_enabled: true +max_duration: "90m" pull_driven_scaling_enabled: false -labels: - - "Ubuntu" - - "core-example" +#labels: +# - "Ubuntu" +# - "core-example" diff --git a/modules/eks/actions-runner-controller/main.tf b/modules/eks/actions-runner-controller/main.tf index cfb6d6309..39684c171 100644 --- a/modules/eks/actions-runner-controller/main.tf +++ b/modules/eks/actions-runner-controller/main.tf @@ -1,8 +1,12 @@ locals { - enabled = module.this.enabled + enabled = module.this.enabled + context_labels = var.context_tags_enabled ? values(module.this.tags) : [] - webhook_enabled = local.enabled ? try(var.webhook.enabled, false) : false - webhook_host = local.webhook_enabled ? format(var.webhook.hostname_template, var.tenant, var.stage, var.environment) : "example.com" + webhook_enabled = local.enabled ? try(var.webhook.enabled, false) : false + webhook_host = local.webhook_enabled ? format(var.webhook.hostname_template, var.tenant, var.stage, var.environment) : "example.com" + runner_groups_enabled = length(compact(values(var.runners)[*].group)) > 0 + docker_config_json_enabled = local.enabled && var.docker_config_json_enabled + docker_config_json = one(data.aws_ssm_parameter.docker_config_json[*].value) github_app_enabled = length(var.github_app_id) > 0 && length(var.github_app_installation_id) > 0 create_secret = local.enabled && length(var.existing_kubernetes_secret_name) == 0 @@ -99,9 +103,15 @@ data "aws_ssm_parameter" "github_webhook_secret_token" { with_decryption = true } +data "aws_ssm_parameter" "docker_config_json" { + count = local.docker_config_json_enabled ? 1 : 0 + name = var.ssm_docker_config_json_path + with_decryption = true +} + module "actions_runner_controller" { source = "cloudposse/helm-release/aws" - version = "0.7.0" + version = "0.10.1" name = "" # avoids hitting length restrictions on IAM Role names chart = var.chart @@ -130,16 +140,22 @@ module "actions_runner_controller" { file("${path.module}/resources/values.yaml"), # standard k8s object settings yamlencode({ - fullnameOverride = module.this.name, + fullnameOverride = module.this.name serviceAccount = { name = module.this.name - }, + } resources = var.resources rbac = { create = var.rbac_enabled } + replicaCount = var.controller_replica_count githubWebhookServer = { - enabled = var.webhook.enabled + enabled = var.webhook.enabled + queueLimit = var.webhook.queue_limit + useRunnerGroupsVisibility = local.runner_groups_enabled + secret = { + create = local.create_secret + } ingress = { enabled = var.webhook.enabled hosts = [ @@ -154,7 +170,7 @@ module "actions_runner_controller" { } ] } - }, + } authSecret = { enabled = true create = local.create_secret @@ -189,7 +205,7 @@ module "actions_runner" { for_each = local.enabled ? var.runners : {} source = "cloudposse/helm-release/aws" - version = "0.7.0" + version = "0.10.1" name = each.key chart = "${path.module}/charts/actions-runner" @@ -203,24 +219,37 @@ module "actions_runner" { values = compact([ yamlencode({ release_name = each.key + pod_annotations = each.value.pod_annotations + running_pod_annotations = each.value.running_pod_annotations service_account_name = module.actions_runner_controller.service_account_name type = each.value.type scope = each.value.scope image = each.value.image + auto_update_enabled = each.value.auto_update_enabled dind_enabled = each.value.dind_enabled service_account_role_arn = module.actions_runner_controller.service_account_role_arn resources = each.value.resources - storage = each.value.storage - labels = each.value.labels + docker_storage = each.value.docker_storage != null ? each.value.docker_storage : each.value.storage + labels = concat(each.value.labels, local.context_labels) scale_down_delay_seconds = each.value.scale_down_delay_seconds min_replicas = each.value.min_replicas max_replicas = each.value.max_replicas + scheduled_overrides = each.value.scheduled_overrides webhook_driven_scaling_enabled = each.value.webhook_driven_scaling_enabled + max_duration = coalesce(each.value.webhook_startup_timeout, each.value.max_duration, "1h") + wait_for_docker_seconds = each.value.wait_for_docker_seconds pull_driven_scaling_enabled = each.value.pull_driven_scaling_enabled + pvc_enabled = each.value.pvc_enabled + tmpfs_enabled = each.value.tmpfs_enabled + node_selector = each.value.node_selector + affinity = each.value.affinity + tolerations = each.value.tolerations + docker_config_json_enabled = local.docker_config_json_enabled + docker_config_json = local.docker_config_json }), + each.value.group == null ? "" : yamlencode({ group = each.value.group }), local.busy_metrics_filtered[each.key] == null ? "" : yamlencode(local.busy_metrics_filtered[each.key]), ]) depends_on = [module.actions_runner_controller] } - diff --git a/modules/eks/actions-runner-controller/outputs.tf b/modules/eks/actions-runner-controller/outputs.tf index e96163492..0c6853869 100644 --- a/modules/eks/actions-runner-controller/outputs.tf +++ b/modules/eks/actions-runner-controller/outputs.tf @@ -1,6 +1,29 @@ output "metadata" { value = module.actions_runner_controller.metadata description = "Block status of the deployed release" + + precondition { + condition = length([ + for k, v in var.runners : k if v.webhook_startup_timeout != null && v.max_duration != null + ]) == 0 + error_message = <<-EOT + The input var.runners[runner].webhook_startup_timeout is deprecated and replaced by var.runners[runner].max_duration. + You may not set both values at the same time, but the following runners have both values set: + ${join("\n ", [for k, v in var.runners : k if v.webhook_startup_timeout != null && v.max_duration != null])} + + EOT + } + precondition { + condition = length([ + for k, v in var.runners : k if v.storage != null && v.docker_storage != null + ]) == 0 + error_message = <<-EOT + The input var.runners[runner].storage is deprecated and replaced by var.runners[runner].docker_storage. + You may not set both values at the same time, but the following runners have both values set: + ${join("\n ", [for k, v in var.runners : k if v.storage != null && v.docker_storage != null])} + + EOT + } } output "metadata_action_runner_releases" { @@ -12,4 +35,3 @@ output "webhook_payload_url" { value = local.webhook_enabled ? format("https://${var.webhook.hostname_template}", var.tenant, var.stage, var.environment) : null description = "Payload URL for GitHub webhook" } - diff --git a/modules/eks/actions-runner-controller/provider-helm.tf b/modules/eks/actions-runner-controller/provider-helm.tf index 9bb5edb6f..91cc7f6d4 100644 --- a/modules/eks/actions-runner-controller/provider-helm.tf +++ b/modules/eks/actions-runner-controller/provider-helm.tf @@ -21,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -42,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -51,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -101,16 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] # Provide dummy configuration for the case where the EKS cluster is not available. - certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") - eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -121,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -146,15 +180,16 @@ provider "helm" { provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/actions-runner-controller/providers.tf b/modules/eks/actions-runner-controller/providers.tf index 80d153743..89ed50a98 100644 --- a/modules/eks/actions-runner-controller/providers.tf +++ b/modules/eks/actions-runner-controller/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,16 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - diff --git a/modules/eks/actions-runner-controller/remote-state.tf b/modules/eks/actions-runner-controller/remote-state.tf index 90c6ab1a8..c1ec8226d 100644 --- a/modules/eks/actions-runner-controller/remote-state.tf +++ b/modules/eks/actions-runner-controller/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/actions-runner-controller/resources/values.yaml b/modules/eks/actions-runner-controller/resources/values.yaml index c48e9bbcb..f4a9db43d 100644 --- a/modules/eks/actions-runner-controller/resources/values.yaml +++ b/modules/eks/actions-runner-controller/resources/values.yaml @@ -1,35 +1,34 @@ authSecret: create: false - name: controller-manager -replicaCount: 1 + # Use default name, or set via var.existing_kubernetes_secret_name scope: # If true, the controller will only watch custom resources in a single namespace, # which by default is the namespace the controller is in. # This provides the ability to run multiple controllers in different namespaces # with different TOKENS to get around GitHub API rate limits, among other things. singleNamespace: true -syncPeriod: 120s +# syncPeriod sets the period in which the controller reconciles the desired runners count. +# The default value is 60 seconds. +# syncPeriod: 120s githubWebhookServer: enabled: false - syncPeriod: 120s secret: # Webhook secret, used to authenticate incoming webhook events from GitHub # When using Sops, stored in same SopsSecret as authSecret under key `github_webhook_secret_token` + # with name set via var.existing_kubernetes_secret_name. Otherwise, use default name. enabled: true create: false - name: "controller-manager" useRunnerGroupsVisibility: false ingress: enabled: false annotations: alb.ingress.kubernetes.io/backend-protocol: HTTP - alb.ingress.kubernetes.io/group.name: common + # Use the default ingress, or uncomment and set the group name to use a different one + # alb.ingress.kubernetes.io/group.name: common alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80},{"HTTPS":443}]' - alb.ingress.kubernetes.io/load-balancer-name: k8s-common alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/ssl-redirect: '443' alb.ingress.kubernetes.io/target-type: ip - kubernetes.io/ingress.class: alb podDisruptionBudget: maxUnavailable: "60%" diff --git a/modules/eks/actions-runner-controller/variables.tf b/modules/eks/actions-runner-controller/variables.tf index 50bef3f50..a9c7f16c4 100644 --- a/modules/eks/actions-runner-controller/variables.tf +++ b/modules/eks/actions-runner-controller/variables.tf @@ -25,6 +25,12 @@ variable "chart_version" { default = null } +variable "controller_replica_count" { + type = number + description = "The number of replicas of the runner-controller to run." + default = 2 +} + variable "resources" { type = object({ limits = object({ @@ -86,33 +92,6 @@ variable "rbac_enabled" { description = "Service Account for pods." } -# Runner-specific settings - -/* -variable "account_map_environment_name" { - type = string - description = "The name of the environment where `account_map` is provisioned" - default = "gbl" -} - -variable "account_map_stage_name" { - type = string - description = "The name of the stage where `account_map` is provisioned" - default = "root" -} - -variable "account_map_tenant_name" { - type = string - description = <<-EOT - The name of the tenant where `account_map` is provisioned. - - If the `tenant` label is not used, leave this as `null`. - EOT - default = "core" -} - -*/ - variable "existing_kubernetes_secret_name" { type = string description = <<-EOT @@ -131,6 +110,18 @@ variable "s3_bucket_arns" { default = [] } +variable "docker_config_json_enabled" { + type = bool + description = "Whether the Docker config JSON is enabled" + default = false +} + +variable "ssm_docker_config_json_path" { + type = string + description = "SSM path to the Docker config JSON" + default = null +} + variable "runners" { description = <<-EOT Map of Action Runner configurations, with the key being the name of the runner. Please note that the name must be in @@ -141,18 +132,13 @@ variable "runners" { ```hcl organization_runner = { type = "organization" # can be either 'organization' or 'repository' - dind_enabled: false # A Docker sidecar container will be deployed - image: summerwind/actions-runner # If dind_enabled=true, set this to 'summerwind/actions-runner-dind' + dind_enabled: true # A Docker daemon will be started in the runner Pod + image: summerwind/actions-runner-dind # If dind_enabled=false, set this to 'summerwind/actions-runner' scope = "ACME" # org name for Organization runners, repo name for Repository runners + group = "core-automation" # Optional. Assigns the runners to a runner group, for access control. scale_down_delay_seconds = 300 min_replicas = 1 max_replicas = 5 - busy_metrics = { - scale_up_threshold = 0.75 - scale_down_threshold = 0.25 - scale_up_factor = 2 - scale_down_factor = 0.5 - } labels = [ "Ubuntu", "core-automation", @@ -162,13 +148,44 @@ variable "runners" { EOT type = map(object({ - type = string - scope = string - image = optional(string, "") - dind_enabled = bool - scale_down_delay_seconds = number + type = string + scope = string + group = optional(string, null) + image = optional(string, "summerwind/actions-runner-dind") + auto_update_enabled = optional(bool, true) + dind_enabled = optional(bool, true) + node_selector = optional(map(string), {}) + pod_annotations = optional(map(string), {}) + + # running_pod_annotations are only applied to the pods once they start running a job + running_pod_annotations = optional(map(string), {}) + + # affinity is too complex to model. Whatever you assigned affinity will be copied + # to the runner Pod spec. + affinity = optional(any) + + tolerations = optional(list(object({ + key = string + operator = string + value = optional(string, null) + effect = string + })), []) + scale_down_delay_seconds = optional(number, 300) min_replicas = number max_replicas = number + # Scheduled overrides. See https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#scheduled-overrides + # Order is important. The earlier entry is prioritized higher than later entries. So you usually define + # one-time overrides at the top of your list, then yearly, monthly, weekly, and lastly daily overrides. + scheduled_overrides = optional(list(object({ + start_time = string # ISO 8601 format, eg, "2021-06-01T00:00:00+09:00" + end_time = string # ISO 8601 format, eg, "2021-06-01T00:00:00+09:00" + min_replicas = optional(number) + max_replicas = optional(number) + recurrence_rule = optional(object({ + frequency = string # One of Daily, Weekly, Monthly, Yearly + until_time = optional(string) # ISO 8601 format time after which the schedule will no longer apply + })) + })), []) busy_metrics = optional(object({ scale_up_threshold = string scale_down_threshold = string @@ -177,21 +194,53 @@ variable "runners" { scale_up_factor = optional(string) scale_down_factor = optional(string) })) - webhook_driven_scaling_enabled = bool - pull_driven_scaling_enabled = bool - labels = list(string) - storage = optional(string, "") - resources = object({ - limits = object({ - cpu = string - memory = string - ephemeral_storage = optional(string, "") - }) - requests = object({ - cpu = string - memory = string - }) - }) + webhook_driven_scaling_enabled = optional(bool, true) + # max_duration is the duration after which a job will be considered completed, + # even if the webhook has not received a "job completed" event. + # This is to ensure that if an event is missed, it does not leave the runner running forever. + # Set it long enough to cover the longest job you expect to run and then some. + # See https://github.com/actions/actions-runner-controller/blob/9afd93065fa8b1f87296f0dcdf0c2753a0548cb7/docs/automatically-scaling-runners.md?plain=1#L264-L268 + # Defaults to 1 hour programmatically (to be able to detect if both max_duration and webhook_startup_timeout are set). + max_duration = optional(string) + # The name `webhook_startup_timeout` was misleading and has been deprecated. + # It has been renamed `max_duration`. + webhook_startup_timeout = optional(string) + # Adjust the time (in seconds) to wait for the Docker in Docker daemon to become responsive. + wait_for_docker_seconds = optional(string, "") + pull_driven_scaling_enabled = optional(bool, false) + labels = optional(list(string), []) + # If not null, `docker_storage` specifies the size (as `go` string) of + # an ephemeral (default storage class) Persistent Volume to allocate for the Docker daemon. + # Takes precedence over `tmpfs_enabled` for the Docker daemon storage. + docker_storage = optional(string, null) + # storage is deprecated in favor of docker_storage, since it is only storage for the Docker daemon + storage = optional(string, null) + # If `pvc_enabled` is true, a Persistent Volume Claim will be created for the runner + # and mounted at /home/runner/work/shared. This is useful for sharing data between runners. + pvc_enabled = optional(bool, false) + # If `tmpfs_enabled` is `true`, both the runner and the docker daemon will use a tmpfs volume, + # meaning that all data will be stored in RAM rather than on disk, bypassing disk I/O limitations, + # but what would have been disk usage is now additional memory usage. You must specify memory + # requests and limits when using tmpfs or else the Pod will likely crash the Node. + tmpfs_enabled = optional(bool) + resources = optional(object({ + limits = optional(object({ + cpu = optional(string, "1") + memory = optional(string, "1Gi") + # ephemeral-storage is the Kubernetes name, but `ephemeral_storage` is the gomplate name, + # so allow either. If both are specified, `ephemeral-storage` takes precedence. + ephemeral-storage = optional(string) + ephemeral_storage = optional(string, "10Gi") + }), {}) + requests = optional(object({ + cpu = optional(string, "500m") + memory = optional(string, "256Mi") + # ephemeral-storage is the Kubernetes name, but `ephemeral_storage` is the gomplate name, + # so allow either. If both are specified, `ephemeral-storage` takes precedence. + ephemeral-storage = optional(string) + ephemeral_storage = optional(string, "1Gi") + }), {}) + }), {}) })) } @@ -199,15 +248,19 @@ variable "webhook" { type = object({ enabled = bool hostname_template = string + queue_limit = optional(number, 1000) }) description = <<-EOT Configuration for the GitHub Webhook Server. `hostname_template` is the `format()` string to use to generate the hostname via `format(var.hostname_template, var.tenant, var.stage, var.environment)`" Typically something like `"echo.%[3]v.%[2]v.example.com"`. + `queue_limit` is the maximum number of webhook events that can be queued up for processing by the autoscaler. + When the queue gets full, webhook events will be dropped (status 500). EOT default = { enabled = false hostname_template = null + queue_limit = 1000 } } @@ -240,3 +293,9 @@ variable "ssm_github_webhook_secret_token_path" { description = "The path in SSM to the GitHub Webhook Secret token." default = "" } + +variable "context_tags_enabled" { + type = bool + description = "Whether or not to include all context tags as labels for each runner" + default = false +} diff --git a/modules/eks/actions-runner-controller/versions.tf b/modules/eks/actions-runner-controller/versions.tf index 482ed7ae8..f4e52c7b2 100644 --- a/modules/eks/actions-runner-controller/versions.tf +++ b/modules/eks/actions-runner-controller/versions.tf @@ -12,7 +12,7 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.0" + version = ">= 2.0, != 2.21.0" } } } diff --git a/modules/eks/alb-controller-ingress-class/README.md b/modules/eks/alb-controller-ingress-class/README.md index af925ccee..d7856b6d2 100644 --- a/modules/eks/alb-controller-ingress-class/README.md +++ b/modules/eks/alb-controller-ingress-class/README.md @@ -1,10 +1,18 @@ +--- +tags: + - component/eks/alb-controller-ingress-class + - layer/eks + - provider/aws + - provider/helm +--- + # Component: `eks/alb-controller-ingress-class` -This component deploys a Kubernetes `IngressClass` resource for the AWS Load Balancer Controller. -This is not often needed, as the default IngressClass deployed by the `eks/alb-controller` component -is sufficient for most use cases, and when it is not, a service can deploy its own IngressClass. -This is for the rare case where you want to deploy an additional IngressClass deploying an additional -ALB that you nevertheless want to be shared by some services, with none of them explicitly owning it. +This component deploys a Kubernetes `IngressClass` resource for the AWS Load Balancer Controller. This is not often +needed, as the default IngressClass deployed by the `eks/alb-controller` component is sufficient for most use cases, and +when it is not, a service can deploy its own IngressClass. This is for the rare case where you want to deploy an +additional IngressClass deploying an additional ALB that you nevertheless want to be shared by some services, with none +of them explicitly owning it. ## Usage @@ -21,6 +29,7 @@ components: scheme: internet-facing ``` + ## Requirements @@ -29,20 +38,20 @@ components: | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.9.0 | | [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.14.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.14.0, != 2.21.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.9.0 | -| [kubernetes](#provider\_kubernetes) | >= 2.14.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.14.0, != 2.21.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -69,10 +78,8 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [group](#input\_group) | Group name for default ingress | `string` | `"common"` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [ip\_address\_type](#input\_ip\_address\_type) | IP address type for default ingress, one of `ipv4` or `dualstack`. | `string` | `"dualstack"` | no | | [is\_default](#input\_is\_default) | Set `true` to make this the default IngressClass. There should only be one default per cluster. | `bool` | `false` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | @@ -81,7 +88,8 @@ components: | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -103,6 +111,7 @@ components: No outputs. + ## References diff --git a/modules/eks/alb-controller-ingress-class/provider-helm.tf b/modules/eks/alb-controller-ingress-class/provider-helm.tf index 9bb5edb6f..91cc7f6d4 100644 --- a/modules/eks/alb-controller-ingress-class/provider-helm.tf +++ b/modules/eks/alb-controller-ingress-class/provider-helm.tf @@ -21,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -42,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -51,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -101,16 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] # Provide dummy configuration for the case where the EKS cluster is not available. - certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") - eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -121,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -146,15 +180,16 @@ provider "helm" { provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/alb-controller-ingress-class/providers.tf b/modules/eks/alb-controller-ingress-class/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/eks/alb-controller-ingress-class/providers.tf +++ b/modules/eks/alb-controller-ingress-class/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/alb-controller-ingress-class/remote-state.tf b/modules/eks/alb-controller-ingress-class/remote-state.tf index 90c6ab1a8..c1ec8226d 100644 --- a/modules/eks/alb-controller-ingress-class/remote-state.tf +++ b/modules/eks/alb-controller-ingress-class/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/alb-controller-ingress-class/versions.tf b/modules/eks/alb-controller-ingress-class/versions.tf index 45b29866a..48fd8c954 100644 --- a/modules/eks/alb-controller-ingress-class/versions.tf +++ b/modules/eks/alb-controller-ingress-class/versions.tf @@ -12,7 +12,7 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.14.0" + version = ">= 2.14.0, != 2.21.0" } } } diff --git a/modules/eks/alb-controller-ingress-group/README.md b/modules/eks/alb-controller-ingress-group/README.md index 58d861226..cee06ff06 100644 --- a/modules/eks/alb-controller-ingress-group/README.md +++ b/modules/eks/alb-controller-ingress-group/README.md @@ -1,8 +1,17 @@ -# alb-controller-ingress-group +--- +tags: + - component/eks/alb-controller-ingress-group + - layer/eks + - provider/aws + - provider/helm +--- -This component creates a Kubernetes Service that creates an ALB for a specific [IngressGroup]. +# Component: `eks/alb-controller-ingress-group` -An [IngressGroup] is a feature of the [alb-controller] which allows multiple Kubernetes Ingresses to share the same Application Load Balancer. +This component provisions a Kubernetes Service that creates an ALB for a specific [IngressGroup]. + +An [IngressGroup] is a feature of the [alb-controller] which allows multiple Kubernetes Ingresses to share the same +Application Load Balancer. ## Usage @@ -15,12 +24,15 @@ import: - catalog/eks/alb-controller-ingress-group ``` -The default catalog values `e.g. stacks/catalog/eks/alb-controller-ingress-group.yaml` will create a Kubernetes Service in the `default` namespace with an [IngressGroup] named `alb-controller-ingress-group`. +The default catalog values `e.g. stacks/catalog/eks/alb-controller-ingress-group.yaml` will create a Kubernetes Service +in the `default` namespace with an [IngressGroup] named `alb-controller-ingress-group`. ```yaml components: terraform: eks/alb-controller-ingress-group: + metadata: + component: eks/alb-controller-ingress-group settings: spacelift: workspace_enabled: true @@ -28,38 +40,36 @@ components: enabled: true # change the name of the Ingress Group name: alb-controller-ingress-group - # if this is not set, the expectation is that account-map - # is deployed within the same tenant - root_account_tenant_name: core ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | -| [kubernetes](#requirement\_kubernetes) | ~> 2.12.1 | +| [aws](#requirement\_aws) | >= 4.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [kubernetes](#provider\_kubernetes) | ~> 2.12.1 | +| [aws](#provider\_aws) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.7.1, != 2.21.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [global\_accelerator](#module\_global\_accelerator) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [global\_accelerator](#module\_global\_accelerator) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [load\_balancer\_name](#module\_load\_balancer\_name) | cloudposse/label/null | 0.25.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [waf](#module\_waf) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [waf](#module\_waf) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -81,24 +91,24 @@ components: | [alb\_access\_logs\_enabled](#input\_alb\_access\_logs\_enabled) | Whether or not to enable access logs for the ALB | `bool` | `false` | no | | [alb\_access\_logs\_s3\_bucket\_name](#input\_alb\_access\_logs\_s3\_bucket\_name) | The name of the S3 bucket to store the access logs in | `string` | `null` | no | | [alb\_access\_logs\_s3\_bucket\_prefix](#input\_alb\_access\_logs\_s3\_bucket\_prefix) | The prefix to use when storing the access logs | `string` | `"echo-server"` | no | +| [alb\_group\_name](#input\_alb\_group\_name) | The name of the alb group | `string` | `null` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false`. | `bool` | `false` | no | -| [default\_annotations](#input\_default\_annotations) | Default annotations to add to the Kubernetes ingress | `map(any)` |
{
"alb.ingress.kubernetes.io/listen-ports": "[{\"HTTP\": 80}, {\"HTTPS\": 443}]",
"alb.ingress.kubernetes.io/scheme": "internet-facing",
"alb.ingress.kubernetes.io/target-type": "ip",
"kubernetes.io/ingress.class": "alb"
}
| no | +| [default\_annotations](#input\_default\_annotations) | Default annotations to add to the Kubernetes ingress | `map(any)` |
{
"alb.ingress.kubernetes.io/listen-ports": "[{\"HTTP\": 80}, {\"HTTPS\": 443}]",
"alb.ingress.kubernetes.io/scheme": "internet-facing",
"alb.ingress.kubernetes.io/ssl-policy": "ELBSecurityPolicy-TLS13-1-2-2021-06",
"alb.ingress.kubernetes.io/target-type": "ip",
"kubernetes.io/ingress.class": "alb"
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dns\_delegated\_component\_name](#input\_dns\_delegated\_component\_name) | The name of the `dns_delegated` component | `string` | `"dns-delegated"` | no | | [dns\_delegated\_environment\_name](#input\_dns\_delegated\_environment\_name) | Global environment name | `string` | `"gbl"` | no | -| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the `eks` component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [fixed\_response\_config](#input\_fixed\_response\_config) | Configuration to overwrite the defaults such as `contentType`, `statusCode`, and `messageBody` | `map(any)` | `{}` | no | | [fixed\_response\_template](#input\_fixed\_response\_template) | Fixed response template to service as a default backend | `string` | `"resources/default-backend.html.tpl"` | no | | [fixed\_response\_vars](#input\_fixed\_response\_vars) | The templatefile vars to use for the fixed response template | `map(any)` |
{
"email": "hello@cloudposse.com"
}
| no | +| [global\_accelerator\_component\_name](#input\_global\_accelerator\_component\_name) | The name of the `global_accelerator` component | `string` | `"global-accelerator"` | no | | [global\_accelerator\_enabled](#input\_global\_accelerator\_enabled) | Whether or not Global Accelerator Endpoint Group should be provisioned for the load balancer | `bool` | `false` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | @@ -124,6 +134,7 @@ components: | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [waf\_component\_name](#input\_waf\_component\_name) | The name of the `waf` component | `string` | `"waf"` | no | | [waf\_enabled](#input\_waf\_enabled) | Whether or not WAF ACL annotation should be provisioned for the load balancer | `bool` | `false` | no | ## Outputs @@ -133,15 +144,20 @@ components: | [annotations](#output\_annotations) | The annotations of the Ingress | | [group\_name](#output\_group\_name) | The value of `alb.ingress.kubernetes.io/group.name` of the Ingress | | [host](#output\_host) | The name of the host used by the Ingress | +| [ingress\_class](#output\_ingress\_class) | The value of the `kubernetes.io/ingress.class` annotation of the Kubernetes Ingress | | [load\_balancer\_name](#output\_load\_balancer\_name) | The name of the load balancer created by the Ingress | +| [load\_balancer\_scheme](#output\_load\_balancer\_scheme) | The value of the `alb.ingress.kubernetes.io/scheme` annotation of the Kubernetes Ingress | | [message\_body\_length](#output\_message\_body\_length) | The length of the message body to ensure it's lower than the maximum limit | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eks/alb-controller-ingress-group) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/alb-controller-ingress-group) - + Cloud Posse's upstream component [](https://cpco.io/component) -[IngressGroup]: -[alb-controller]: +[ingressgroup]: + https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/#ingressgroup +[alb-controller]: https://github.com/kubernetes-sigs/aws-load-balancer-controller diff --git a/modules/eks/alb-controller-ingress-group/default.auto.tfvars b/modules/eks/alb-controller-ingress-group/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/eks/alb-controller-ingress-group/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/eks/alb-controller-ingress-group/main.tf b/modules/eks/alb-controller-ingress-group/main.tf index 8d6e4f1ea..205c0e8f8 100644 --- a/modules/eks/alb-controller-ingress-group/main.tf +++ b/modules/eks/alb-controller-ingress-group/main.tf @@ -22,7 +22,7 @@ locals { global_accelerator.outputs.listener_ids[0] ] - ingress_controller_group_name = module.this.name + ingress_controller_group_name = coalesce(var.alb_group_name, module.this.name) kube_tags = join(",", [for k, v in module.this.tags : "${k}=${v}"]) @@ -33,7 +33,9 @@ locals { # for outputs annotations = try(kubernetes_ingress_v1.default[0].metadata.0.annotations, null) group_name_annotation = try(lookup(kubernetes_ingress_v1.default[0].metadata.0.annotations, "alb.ingress.kubernetes.io/group.name", null), null) - load_balancer_name = join("", data.aws_lb.default[*].name) + scheme_annotation = try(lookup(kubernetes_ingress_v1.default[0].metadata.0.annotations, "alb.ingress.kubernetes.io/scheme", null), null) + class_annotation = try(lookup(kubernetes_ingress_v1.default[0].metadata.0.annotations, "kubernetes.io/ingress.class", null), null) + load_balancer_name = one(data.aws_lb.default[*].name) host = join(".", [module.this.environment, module.dns_delegated.outputs.default_domain_name]) } @@ -89,6 +91,7 @@ resource "kubernetes_ingress_v1" "default" { labels = {} name = module.this.id namespace = local.kubernetes_namespace + annotations = merge( local.waf_acl_arn, local.alb_logging_annotation, @@ -171,7 +174,7 @@ data "aws_lb" "default" { tags = { "ingress.k8s.aws/resource" = "LoadBalancer" - "ingress.k8s.aws/stack" = var.name + "ingress.k8s.aws/stack" = local.ingress_controller_group_name "elbv2.k8s.aws/cluster" = module.eks.outputs.eks_cluster_id } diff --git a/modules/eks/alb-controller-ingress-group/outputs.tf b/modules/eks/alb-controller-ingress-group/outputs.tf index 8926b3a80..5a27ced83 100644 --- a/modules/eks/alb-controller-ingress-group/outputs.tf +++ b/modules/eks/alb-controller-ingress-group/outputs.tf @@ -22,3 +22,15 @@ output "message_body_length" { description = "The length of the message body to ensure it's lower than the maximum limit" value = length(local.message_body) } + +# https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/ +output "load_balancer_scheme" { + description = "The value of the `alb.ingress.kubernetes.io/scheme` annotation of the Kubernetes Ingress" + value = local.scheme_annotation +} + +# https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/ingress/annotations/ +output "ingress_class" { + description = "The value of the `kubernetes.io/ingress.class` annotation of the Kubernetes Ingress" + value = local.class_annotation +} diff --git a/modules/eks/alb-controller-ingress-group/provider-kubernetes.tf b/modules/eks/alb-controller-ingress-group/provider-kubernetes.tf index 00cfd1542..d26650e31 100644 --- a/modules/eks/alb-controller-ingress-group/provider-kubernetes.tf +++ b/modules/eks/alb-controller-ingress-group/provider-kubernetes.tf @@ -77,12 +77,6 @@ variable "kubeconfig_exec_auth_api_version" { description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" } -variable "helm_manifest_experiment_enabled" { - type = bool - default = true - description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" -} - locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled @@ -95,7 +89,7 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] diff --git a/modules/eks/alb-controller-ingress-group/providers.tf b/modules/eks/alb-controller-ingress-group/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/eks/alb-controller-ingress-group/providers.tf +++ b/modules/eks/alb-controller-ingress-group/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/alb-controller-ingress-group/remote-state.tf b/modules/eks/alb-controller-ingress-group/remote-state.tf index 8511e7f56..138570d00 100644 --- a/modules/eks/alb-controller-ingress-group/remote-state.tf +++ b/modules/eks/alb-controller-ingress-group/remote-state.tf @@ -1,8 +1,8 @@ module "dns_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" - component = "dns-delegated" + component = var.dns_delegated_component_name environment = var.dns_delegated_environment_name context = module.this.context @@ -10,7 +10,7 @@ module "dns_delegated" { module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.eks_component_name @@ -19,11 +19,11 @@ module "eks" { module "global_accelerator" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" for_each = local.global_accelerator_enabled ? toset(["true"]) : [] - component = "global-accelerator" + component = var.global_accelerator_component_name environment = "gbl" context = module.this.context @@ -31,11 +31,11 @@ module "global_accelerator" { module "waf" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" for_each = local.waf_enabled ? toset(["true"]) : [] - component = "waf" + component = var.waf_component_name context = module.this.context } diff --git a/modules/eks/alb-controller-ingress-group/variables.tf b/modules/eks/alb-controller-ingress-group/variables.tf index b984c0625..70a48527d 100644 --- a/modules/eks/alb-controller-ingress-group/variables.tf +++ b/modules/eks/alb-controller-ingress-group/variables.tf @@ -29,19 +29,38 @@ variable "default_annotations" { "alb.ingress.kubernetes.io/target-type" = "ip" "kubernetes.io/ingress.class" = "alb" "alb.ingress.kubernetes.io/listen-ports" = "[{\"HTTP\": 80}, {\"HTTPS\": 443}]" + "alb.ingress.kubernetes.io/ssl-policy" = "ELBSecurityPolicy-TLS13-1-2-2021-06" } } -variable "dns_delegated_environment_name" { +variable "eks_component_name" { type = string - description = "Global environment name" - default = "gbl" + description = "The name of the `eks` component" + default = "eks/cluster" } -variable "eks_component_name" { +variable "global_accelerator_component_name" { type = string - description = "The name of the eks component" - default = "eks/cluster" + description = "The name of the `global_accelerator` component" + default = "global-accelerator" +} + +variable "dns_delegated_component_name" { + type = string + description = "The name of the `dns_delegated` component" + default = "dns-delegated" +} + +variable "waf_component_name" { + type = string + description = "The name of the `waf` component" + default = "waf" +} + +variable "dns_delegated_environment_name" { + type = string + description = "Global environment name" + default = "gbl" } variable "global_accelerator_enabled" { @@ -116,3 +135,9 @@ variable "fixed_response_vars" { email = "hello@cloudposse.com" } } + +variable "alb_group_name" { + type = string + description = "The name of the alb group" + default = null +} diff --git a/modules/eks/alb-controller-ingress-group/versions.tf b/modules/eks/alb-controller-ingress-group/versions.tf index e28af9b9a..5e4bbc1d4 100644 --- a/modules/eks/alb-controller-ingress-group/versions.tf +++ b/modules/eks/alb-controller-ingress-group/versions.tf @@ -4,11 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } kubernetes = { source = "hashicorp/kubernetes" - version = "~> 2.12.1" + version = ">= 2.7.1, != 2.21.0" } } } diff --git a/modules/eks/alb-controller/CHANGELOG.md b/modules/eks/alb-controller/CHANGELOG.md new file mode 100644 index 000000000..638dd2e73 --- /dev/null +++ b/modules/eks/alb-controller/CHANGELOG.md @@ -0,0 +1,35 @@ +## Release 1.466.0 + +PR [#1070](https://github.com/cloudposse/terraform-aws-components/pull/1070) + +Change default for `default_ingress_ip_address_type` from `dualstack` to `ipv4`. When `dualstack` is configured, the +Ingress will fail if the VPC does not have an IPv6 CIDR block, which is still a common case. When `ipv4` is configured, +the Ingress will work with only an IPv4 CIDR block, and simply will not use IPv6 if it exists. This makes `ipv4` the +more conservative default. + +## Release 1.432.0 + +Better support for Kubeconfig authentication + +## Release 1.289.1 + +PR [#821](https://github.com/cloudposse/terraform-aws-components/pull/821) + +### Update IAM Policy and Change How it is Managed + +The ALB controller needs a lot of permissions and has a complex IAM policy. For this reason, the project releases a +complete JSON policy document that is updated as needed. + +In this release: + +1. We have updated the policy to the one distributed with version 2.6.0 of the ALB controller. This fixes an issue where + the controller was not able to create the service-linked role for the Elastic Load Balancing service. +2. To ease maintenance, we have moved the policy document to a separate file, `distributed-iam-policy.tf` and made it + easy to update or override. + +#### Gov Cloud and China Regions + +Actually, the project releases 3 policy documents, one for each of the three AWS partitions: `aws`, `aws-cn`, and +`aws-us-gov`. For simplicity, this module only uses the `aws` partition policy. If you are in another partition, you can +create a `distributed-iam-policy_override.tf` file in your directory and override the +`overridable_distributed_iam_policy` local variable with the policy document for your partition. diff --git a/modules/eks/alb-controller/README.md b/modules/eks/alb-controller/README.md index 189e0d045..ccf4e616b 100644 --- a/modules/eks/alb-controller/README.md +++ b/modules/eks/alb-controller/README.md @@ -1,10 +1,27 @@ +--- +tags: + - component/eks/alb-controller + - layer/eks + - provider/aws + - provider/helm +--- + # Component: `eks/alb-controller` -This component creates a Helm release for [alb-controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) on an EKS cluster. +This component creates a Helm release for +[alb-controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) on an EKS cluster. + +[alb-controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) is a Kubernetes addon that, in the +context of AWS, provisions and manages ALBs and NLBs based on Service and Ingress annotations. This module also can (and +is recommended to) provision a default IngressClass. + +### Special note about upgrading -[alb-controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) is a Kubernetes addon that, -in the context of AWS, provisions and manages ALBs and NLBs based on Service and Ingress annotations. -This module also can (and is recommended to) provision a default IngressClass. +When upgrading the chart version, check to see if the IAM policy for the service account needs to be updated. If it +does, update the policy in the `distributed-iam-policy.tf` file. Probably the easiest way to check if it needs updating +is to simply download the policy from +https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.json and +compare it to the policy in `distributed-iam-policy.tf`. ## Usage @@ -27,7 +44,9 @@ components: vars: chart: aws-load-balancer-controller chart_repository: https://aws.github.io/eks-charts - chart_version: "1.4.5" + # IMPORTANT: When updating the chart version, check to see if the IAM policy for the service account. + # needs to be updated, and if it does, update the policy in the `distributed-iam-policy.tf` file. + chart_version: "1.6.0" create_namespace: true kubernetes_namespace: alb-controller # this feature causes inconsistent final plans @@ -47,6 +66,7 @@ components: chart_values: {} ``` + ## Requirements @@ -55,7 +75,7 @@ components: | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.9.0 | | [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.14.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.14.0, != 2.21.0 | ## Providers @@ -67,8 +87,8 @@ components: | Name | Source | Version | |------|--------|---------| -| [alb\_controller](#module\_alb\_controller) | cloudposse/helm-release/aws | 0.7.0 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [alb\_controller](#module\_alb\_controller) | cloudposse/helm-release/aws | 0.10.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -97,7 +117,7 @@ components: | [default\_ingress\_class\_name](#input\_default\_ingress\_class\_name) | Class name for default ingress | `string` | `"default"` | no | | [default\_ingress\_enabled](#input\_default\_ingress\_enabled) | Set `true` to deploy a default IngressClass. There should only be one default per cluster. | `bool` | `true` | no | | [default\_ingress\_group](#input\_default\_ingress\_group) | Group name for default ingress | `string` | `"common"` | no | -| [default\_ingress\_ip\_address\_type](#input\_default\_ingress\_ip\_address\_type) | IP address type for default ingress, one of `ipv4` or `dualstack`. | `string` | `"dualstack"` | no | +| [default\_ingress\_ip\_address\_type](#input\_default\_ingress\_ip\_address\_type) | IP address type for default ingress, one of `ipv4` or `dualstack`. | `string` | `"ipv4"` | no | | [default\_ingress\_load\_balancer\_attributes](#input\_default\_ingress\_load\_balancer\_attributes) | A list of load balancer attributes to apply to the default ingress load balancer.
See [Load Balancer Attributes](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#load-balancer-attributes). | `list(object({ key = string, value = string }))` | `[]` | no | | [default\_ingress\_scheme](#input\_default\_ingress\_scheme) | Scheme for default ingress, one of `internet-facing` or `internal`. | `string` | `"internet-facing"` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | @@ -105,17 +125,16 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -134,7 +153,6 @@ components: | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | -| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `null` | no | ## Outputs @@ -142,6 +160,7 @@ components: |------|-------------| | [metadata](#output\_metadata) | Block status of the deployed release | + ## References diff --git a/modules/eks/alb-controller/distributed-iam-policy.tf b/modules/eks/alb-controller/distributed-iam-policy.tf new file mode 100644 index 000000000..fcc655182 --- /dev/null +++ b/modules/eks/alb-controller/distributed-iam-policy.tf @@ -0,0 +1,263 @@ + +# The kubernetes-sigs/aws-load-balancer-controller/ project distributes the +# AWS IAM policy that is required for the AWS Load Balancer Controller as a JSON +# download at https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v/docs/install/iam_policy.json +# See https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/deploy/installation/#option-a-recommended-iam-roles-for-service-accounts-irsa for details. + +# We could directly use the URL to download and install the policy at runtime, +# via the cloudposse/helm-release/aws module's ` iam_source_json_url` input, +# but that lacks transparency and auditability. It also does not give us a chance +# to make changes in response to bugs, such as +# https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/2692#issuecomment-1426242236 +# +# So we download the policy and insert it here as a local variable. + +locals { + # To update, just replace everything between the two "EOT"s with the contents of the downloaded JSON file. + # Below is the policy as of version 2.6.0, downloaded from + # https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.6.0/docs/install/iam_policy.json + # This policy is for the `aws` partition. Override overridable_distributed_iam_policy for other partitions. + overridable_distributed_iam_policy = < 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -146,15 +180,16 @@ provider "helm" { provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/alb-controller/providers.tf b/modules/eks/alb-controller/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/eks/alb-controller/providers.tf +++ b/modules/eks/alb-controller/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/alb-controller/remote-state.tf b/modules/eks/alb-controller/remote-state.tf index 90c6ab1a8..c1ec8226d 100644 --- a/modules/eks/alb-controller/remote-state.tf +++ b/modules/eks/alb-controller/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/alb-controller/variables.tf b/modules/eks/alb-controller/variables.tf index 70e6a32b5..840969ea7 100644 --- a/modules/eks/alb-controller/variables.tf +++ b/modules/eks/alb-controller/variables.tf @@ -68,12 +68,6 @@ variable "atomic" { default = true } -variable "wait" { - type = bool - description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`." - default = null -} - variable "chart_values" { type = any description = "Additional values to yamlencode as `helm_release` values." @@ -126,7 +120,7 @@ variable "default_ingress_scheme" { variable "default_ingress_ip_address_type" { type = string description = "IP address type for default ingress, one of `ipv4` or `dualstack`." - default = "dualstack" + default = "ipv4" validation { condition = contains(["ipv4", "dualstack"], var.default_ingress_ip_address_type) diff --git a/modules/eks/alb-controller/versions.tf b/modules/eks/alb-controller/versions.tf index 45b29866a..48fd8c954 100644 --- a/modules/eks/alb-controller/versions.tf +++ b/modules/eks/alb-controller/versions.tf @@ -12,7 +12,7 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.14.0" + version = ">= 2.14.0, != 2.21.0" } } } diff --git a/modules/eks/argocd/CHANGELOG.md b/modules/eks/argocd/CHANGELOG.md new file mode 100644 index 000000000..e1c5a2d2d --- /dev/null +++ b/modules/eks/argocd/CHANGELOG.md @@ -0,0 +1,148 @@ +## Components PR [#905](https://github.com/cloudposse/terraform-aws-components/pull/905) + +The `notifications.tf` file has been renamed to `notifications.tf`. Delete `notifications.tf` after vendoring these +changes. + +## Components PR [#851](https://github.com/cloudposse/terraform-aws-components/pull/851) + +This is a bug fix and feature enhancement update. There are few actions necessary to upgrade. + +## Upgrade actions + +1. Update atmos stack yaml config + 1. Add `github_default_notifications_enabled: true` + 2. Add `github_webhook_enabled: true` + 3. Remove `notifications_triggers` + 4. Remove `notifications_templates` + 5. Remove `notifications_notifiers` + +```diff + components: + terraform: + argocd: + settings: + spacelift: + workspace_enabled: true + metadata: + component: eks/argocd + vars: ++ github_default_notifications_enabled: true ++ github_webhook_enabled: true +- notifications_triggers: +- trigger_on-deployed: +- - when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy' +- oncePer: app.status.sync.revision +- send: [app-deployed] +- notifications_templates: +- template_app-deployed: +- message: | +- Application {{.app.metadata.name}} is now running new version of deployments manifests. +- github: +- status: +- state: success +- label: "continuous-delivery/{{.app.metadata.name}}" +- targetURL: "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true" +- notifications_notifiers: +- service_github: +- appID: xxxxxxx +- installationID: xxxxxxx +``` + +2. Move secrets from `/argocd/notifications/notifiers/service_webhook_github-commit-status/github-token` to + `argocd/notifications/notifiers/common/github-token` + +```bash +chamber read -q argocd/notifications/notifiers/service_webhook_github-commit-status github-token | chamber write argocd/notifications/notifiers/common github-token +chamber delete argocd/notifications/notifiers/service_webhook_github-commit-status github-token +``` + +3. [Create GitHub PAT](https://docs.github.com/en/enterprise-server@3.6/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token) + with scope `admin:repo_hook` +4. Save the PAT to SSM `/argocd/github/api_key` + +```bash +chamber write argocd/github api_key ${PAT} +``` + +5. Apply changes with atmos + +## Features + +- [Git Webhook Configuration](https://argo-cd.readthedocs.io/en/stable/operator-manual/webhook/) - makes GitHub trigger + ArgoCD sync on each commit into argocd repo +- Replace + [GitHub notification service](https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/github/) + with predefined + [Webhook notification service](https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/webhook/) +- Added predefined GitHub commit status notifications for CD sync mode: + - `on-deploy-started` + - `app-repo-github-commit-status` + - `argocd-repo-github-commit-status` + - `on-deploy-succeded` + - `app-repo-github-commit-status` + - `argocd-repo-github-commit-status` + - `on-deploy-failed` + - `app-repo-github-commit-status` + - `argocd-repo-github-commit-status` +- Support SSM secrets (`/argocd/notifications/notifiers/common/*`) common for all notification services. (Can be + referenced with `$common_{secret-name}` ) + +### Bug Fixes + +- ArgoCD notifications pods recreated on deployment that change notifications related configs and secrets +- Remove `metadata` output that expose helm values configs (used in debug purpose) +- Remove legacy unnecessary helm values used in old ArgoCD versions (ex. `workflow auth` configs) and dropped + notifications services + +## Breaking changes + +- Removed `service_github` from `notifications_notifiers` variable structure +- Renamed `service_webhook` to `webhook` in `notifications_notifiers` variable structure + +```diff +variable "notifications_notifiers" { + type = object({ + ssm_path_prefix = optional(string, "/argocd/notifications/notifiers") +- service_github = optional(object({ +- appID = number +- installationID = number +- privateKey = optional(string) +- })) + # service.webhook.: +- service_webhook = optional(map( ++ webhook = optional(map( + object({ + url = string + headers = optional(list( + }) + )) + }) +``` + +- Removed `github` from `notifications_templates` variable structure + +```diff +variable "notifications_templates" { + type = map(object({ + message = string + alertmanager = optional(object({ + labels = map(string) + annotations = map(string) + generatorURL = string + })) +- github = optional(object({ +- status = object({ +- state = string +- label = string +- targetURL = string +- }) +- })) + webhook = optional(map( + object({ + method = optional(string) + path = optional(string) + body = optional(string) + }) + )) + })) +``` diff --git a/modules/eks/argocd/README.md b/modules/eks/argocd/README.md new file mode 100644 index 000000000..26b28cbd0 --- /dev/null +++ b/modules/eks/argocd/README.md @@ -0,0 +1,622 @@ +--- +tags: + - component/eks/argocd + - layer/software-delivery + - provider/aws + - provider/helm +--- + +# Component: `eks/argocd` + +This component is responsible for provisioning [Argo CD](https://argoproj.github.io/cd/). + +Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes. + +> :warning::warning::warning: ArgoCD CRDs must be installed separately from this component/helm release. +> :warning::warning::warning: + +```shell +kubectl apply -k "https://github.com/argoproj/argo-cd/manifests/crds?ref=" + +# Eg. version v2.4.9 +kubectl apply -k "https://github.com/argoproj/argo-cd/manifests/crds?ref=v2.4.9" +``` + +## Usage + +### Preparing AppProject repos: + +First, make sure you have a GitHub repo ready to go. We have a component for this called the `argocd-repo` component. It +will create a GitHub repo and adds some secrets and code owners. Most importantly, it configures an +`applicationset.yaml` that includes all the details for helm to create ArgoCD CRDs. These CRDs let ArgoCD know how to +fulfill changes to its repo. + +```yaml +components: + terraform: + argocd-repo-defaults: + metadata: + type: abstract + vars: + enabled: true + github_user: acme_admin + github_user_email: infra@acme.com + github_organization: ACME + github_codeowner_teams: + - "@ACME/acme-admins" + - "@ACME/CloudPosse" + - "@ACME/developers" + gitignore_entries: + - "**/.DS_Store" + - ".DS_Store" + - "**/.vscode" + - "./vscode" + - ".idea/" + - ".vscode/" + permissions: + - team_slug: acme-admins + permission: admin + - team_slug: CloudPosse + permission: admin + - team_slug: developers + permission: push +``` + +### Injecting infrastructure details into applications + +Second, your application repos could use values to best configure their helm releases. We have an `eks/platform` +component for exposing various infra outputs. It takes remote state lookups and stores them into SSM. We demonstrate how +to pull the platform SSM parameters later. Here's an example `eks/platform` config: + +```yaml +components: + terraform: + eks/platform: + metadata: + type: abstract + component: eks/platform + backend: + s3: + workspace_key_prefix: platform + deps: + - catalog/eks/cluster + - catalog/eks/alb-controller-ingress-group + - catalog/acm + vars: + enabled: true + name: "platform" + eks_component_name: eks/cluster + ssm_platform_path: /platform/%s/%s + references: + default_alb_ingress_group: + component: eks/alb-controller-ingress-group + output: .group_name + default_ingress_domain: + component: dns-delegated + environment: gbl + output: "[.zones[].name][-1]" + + eks/platform/acm: + metadata: + component: eks/platform + inherits: + - eks/platform + vars: + eks_component_name: eks/cluster + references: + default_ingress_domain: + component: acm + environment: use2 + output: .domain_name + + eks/platform/dev: + metadata: + component: eks/platform + inherits: + - eks/platform + vars: + platform_environment: dev + + acm/qa2: + settings: + spacelift: + workspace_enabled: true + metadata: + component: acm + vars: + enabled: true + name: acm-qa2 + tags: + Team: sre + Service: acm + process_domain_validation_options: true + validation_method: DNS + dns_private_zone_enabled: false + certificate_authority_enabled: false +``` + +In the previous sample we create platform settings for a `dev` platform and a `qa2` platform. Understand that these are +arbitrary titles that are used to separate the SSM parameters so that if, say, a particular hostname is needed, we can +safely select the right hostname using a moniker such as `qa2`. These otherwise are meaningless and do not need to align +with any particular stage or tenant. + +### ArgoCD on SAML / AWS Identity Center (formerly aws-sso) + +Here's an example snippet for how to use this component: + +```yaml +components: + terraform: + eks/argocd: + settings: + spacelift: + workspace_enabled: true + depends_on: + - argocd-applicationset + - tenant-gbl-corp-argocd-depoy-non-prod + vars: + enabled: true + alb_group_name: argocd + alb_name: argocd + alb_logs_prefix: argocd + certificate_issuer: selfsigning-issuer + github_organization: MyOrg + oidc_enabled: false + saml_enabled: true + ssm_store_account: corp + ssm_store_account_region: us-west-2 + argocd_repo_name: argocd-deploy-non-prod + argocd_rbac_policies: + - "p, role:org-admin, applications, *, */*, allow" + - "p, role:org-admin, clusters, get, *, allow" + - "p, role:org-admin, repositories, get, *, allow" + - "p, role:org-admin, repositories, create, *, allow" + - "p, role:org-admin, repositories, update, *, allow" + - "p, role:org-admin, repositories, delete, *, allow" + # Note: the IDs for AWS Identity Center groups will change if you alter/replace them: + argocd_rbac_groups: + - group: deadbeef-dead-beef-dead-beefdeadbeef + role: admin + - group: badca7sb-add0-65ba-dca7-sbadd065badc + role: reader + chart_values: + global: + logging: + format: json + level: warn + + sso-saml/aws-sso: + settings: + spacelift: + workspace_enabled: true + metadata: + component: sso-saml-provider + vars: + enabled: true + ssm_path_prefix: "/sso/saml/aws-sso" + usernameAttr: email + emailAttr: email + groupsAttr: groups +``` + +Note, if you set up `sso-saml-provider`, you will need to restart DEX on your EKS cluster manually: + +```bash +kubectl delete pod -n argocd +``` + +The configuration above will work for AWS Identity Center if you have the following attributes in a +[Custom SAML 2.0 application](https://docs.aws.amazon.com/singlesignon/latest/userguide/samlapps.html): + +| attribute name | value | type | +| :------------- | :-------------- | :---------- | +| Subject | ${user:subject} | persistent | +| email | ${user:email} | unspecified | +| groups | ${user:groups} | unspecified | + +You will also need to assign AWS Identity Center groups to your Custom SAML 2.0 application. Make a note of each group +and replace the IDs in the `argocd_rbac_groups` var accordingly. + +### Google Workspace OIDC + +To use Google OIDC: + +```yaml +oidc_enabled: true +saml_enabled: false +oidc_providers: + google: + uses_dex: true + type: google + id: google + name: Google + serviceAccountAccess: + enabled: true + key: googleAuth.json + value: /sso/oidc/google/serviceaccount + admin_email: an_actual_user@acme.com + config: + # This filters emails when signing in with Google to only this domain. helpful for picking the right one. + hostedDomains: + - acme.com + clientID: /sso/saml/google/clientid + clientSecret: /sso/saml/google/clientsecret +``` + +### Working with ArgoCD and GitHub + +Here's a simple GitHub action that will trigger a deployment in ArgoCD: + +```yaml +# NOTE: Example will show dev, and qa2 +name: argocd-deploy +on: + push: + branches: + - main +jobs: + ci: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2.1.0 + with: + aws-region: us-east-2 + role-to-assume: arn:aws:iam::123456789012:role/github-action-worker + - name: Build + shell: bash + run: docker build -t some.docker.repo/acme/app . & docker push some.docker.repo/acmo/app + - name: Checkout Argo Configuration + uses: actions/checkout@v3 + with: + repository: acme/argocd-deploy-non-prod + ref: main + path: argocd-deploy-non-prod + - name: Deploy to dev + shell: bash + run: | + echo Rendering helmfile: + helmfile \ + --namespace acme-app \ + --environment dev \ + --file deploy/app/release.yaml \ + --state-values-file <(aws ssm get-parameter --name /platform/dev),<(docker image inspect some.docker.repo/acme/app) \ + template > argocd-deploy-non-prod/plat/use2-dev/apps/my-preview-acme-app/manifests/resources.yaml + echo Updating sha for app: + yq e '' -i argocd-deploy-non-prod/plat/use2-dev/apps/my-preview-acme-app/config.yaml + echo Committing new helmfile + pushd argocd-deploy-non-prod + git add --all + git commit --message 'Updating acme-app' + git push + popd +``` + +In the above example, we make a few assumptions: + +- You've already made the app in ArgoCD by creating a YAML file in your non-prod ArgoCD repo at the path + `plat/use2-dev/apps/my-preview-acme-app/config.yaml` with contents: + +```yaml +app_repository: acme/app +app_commit: deadbeefdeadbeef +app_hostname: https://some.app.endpoint/landing_page +name: my-feature-branch.acme-app +namespace: my-feature-branch +manifests: plat/use2-dev/apps/my-preview-acme-app/manifests +``` + +- you have set up `ecr` with permissions for github to push docker images to it +- you already have your `ApplicationSet` and `AppProject` crd's in `plat/use2-dev/argocd/applicationset.yaml`, which + should be generated by our `argocd-repo` component. +- your app has a [helmfile template](https://helmfile.readthedocs.io/en/latest/#templating) in `deploy/app/release.yaml` +- that helmfile template can accept both the `eks/platform` config which is pulled from ssm at the path configured in + `eks/platform/defaults` +- the helmfile template can update container resources using the output of `docker image inspect` + +### Notifications + +Here's a configuration for letting argocd send notifications back to GitHub: + +1. [Create GitHub PAT](https://docs.github.com/en/enterprise-server@3.6/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token) + with scope `repo:status` +2. Save the PAT to SSM `/argocd/notifications/notifiers/common/github-token` +3. Use this atmos stack configuration + +```yaml +components: + terraform: + eks/argocd/notifications: + metadata: + component: eks/argocd + vars: + github_default_notifications_enabled: true +``` + +### Webhook + +Here's a configuration Github notify ArgoCD on commit: + +1. [Create GitHub PAT](https://docs.github.com/en/enterprise-server@3.6/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token) + with scope `admin:repo_hook` +2. Save the PAT to SSM `/argocd/github/api_key` +3. Use this atmos stack configuration + +```yaml +components: + terraform: + eks/argocd/notifications: + metadata: + component: eks/argocd + vars: + github_webhook_enabled: true +``` + +#### Creating Webhooks with `github-webhook` + +If you are creating webhooks for ArgoCD deployment repos in multiple GitHub Organizations, you cannot use the same +Terraform GitHub provider. Instead, we can use Atmos to deploy multiple component. To do this, disable the webhook +creation in this component and deploy the webhook with the `github-webhook` component as such: + +```yaml +components: + terraform: + eks/argocd: + metadata: + component: eks/argocd + inherits: + - eks/argocd/defaults + vars: + github_webhook_enabled: true # create webhook value; required for argo-cd chart + create_github_webhook: false # created with github-webhook + argocd_repositories: + "argocd-deploy-non-prod/org1": # this is the name of the `argocd-repo` component for "org1" + environment: ue2 + stage: auto + tenant: core + "argocd-deploy-non-prod/org2": + environment: ue2 + stage: auto + tenant: core + + webhook/org1/argocd: + metadata: + component: github-webhook + vars: + github_organization: org1 + github_repository: argocd-deploy-non-prod + webhook_url: "https://argocd.ue2.dev.plat.acme.org/api/webhook" + ssm_github_webhook: "/argocd/github/webhook" + + webhook/org2/argocd: + metadata: + component: github-webhook + vars: + github_organization: org2 + github_repository: argocd-deploy-non-prod + webhook_url: "https://argocd.ue2.dev.plat.acme.org/api/webhook" + ssm_github_webhook: "/argocd/github/webhook" +``` + +### Slack Notifications + +ArgoCD supports Slack notifications on application deployments. + +1. In order to enable Slack notifications, first create a Slack Application following the + [ArgoCD documentation](https://argocd-notifications.readthedocs.io/en/stable/services/slack/). +1. Create an OAuth token for the new Slack App +1. Save the OAuth token to AWS SSM Parameter Store in the same account and region as Github tokens. For example, + `core-use2-auto` +1. Add the app to the chosen Slack channel. _If not added, notifications will not work_ +1. For this component, enable Slack integrations for each Application with `var.slack_notifications_enabled` and + `var.slack_notifications`: + +```yaml +slack_notifications_enabled: true +slack_notifications: + channel: argocd-updates +``` + +6. In the `argocd-repo` component, set `var.slack_notifications_channel` to the name of the Slack notification channel + to add the relevant ApplicationSet annotations + +## Troubleshooting + +## Login to ArgoCD admin UI + +For ArgoCD v1.9 and later, the initial admin password is available from a Kubernetes secret named +`argocd-initial-admin-secret`. To get the initial password, execute the following command: + +```shell +kubectl get secret -n argocd argocd-initial-admin-secret -o jsonpath='{.data.password}' | base64 --decode +``` + +Then open the ArgoCD admin UI and use the username `admin` and the password obtained in the previous step to log in to +the ArgoCD admin. + +## Error "server.secretkey is missing" + +If you provision a new version of the `eks/argocd` component, and some Helm Chart values get updated, you might +encounter the error "server.secretkey is missing" in the ArgoCD admin UI. To fix the error, execute the following +commands: + +```shell +# Download `kubeconfig` and set EKS cluster +set-eks-cluster cluster-name + +# Restart the `argocd-server` Pods +kubectl rollout restart deploy/argocd-server -n argocd + +# Get the new admin password from the Kubernetes secret +kubectl get secret -n argocd argocd-initial-admin-secret -o jsonpath='{.data.password}' | base64 --decode +``` + +Reference: https://stackoverflow.com/questions/75046330/argo-cd-error-server-secretkey-is-missing + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [github](#requirement\_github) | >= 4.0 | +| [helm](#requirement\_helm) | >= 2.6.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.9.0, != 2.21.0 | +| [random](#requirement\_random) | >= 3.5 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [aws.config\_secrets](#provider\_aws.config\_secrets) | >= 4.0 | +| [github](#provider\_github) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.9.0, != 2.21.0 | +| [random](#provider\_random) | >= 3.5 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [argocd](#module\_argocd) | cloudposse/helm-release/aws | 0.10.1 | +| [argocd\_apps](#module\_argocd\_apps) | cloudposse/helm-release/aws | 0.10.1 | +| [argocd\_repo](#module\_argocd\_repo) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [iam\_roles\_config\_secrets](#module\_iam\_roles\_config\_secrets) | ../../account-map/modules/iam-roles | n/a | +| [notifications\_notifiers](#module\_notifications\_notifiers) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | +| [notifications\_templates](#module\_notifications\_templates) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | +| [saml\_sso\_providers](#module\_saml\_sso\_providers) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [github_repository_webhook.default](https://registry.terraform.io/providers/integrations/github/latest/docs/resources/repository_webhook) | resource | +| [random_password.webhook](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_ssm_parameter.github_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.github_deploy_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.oidc_client_id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.oidc_client_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.slack_notifications](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameters_by_path.argocd_notifications](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameters_by_path) | data source | +| [kubernetes_resources.crd](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/resources) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_enabled](#input\_admin\_enabled) | Toggles Admin user creation the deployed chart | `bool` | `false` | no | +| [alb\_group\_name](#input\_alb\_group\_name) | A name used in annotations to reuse an ALB (e.g. `argocd`) or to generate a new one | `string` | `null` | no | +| [alb\_logs\_bucket](#input\_alb\_logs\_bucket) | The name of the bucket for ALB access logs. The bucket must have policy allowing the ELB logging principal | `string` | `""` | no | +| [alb\_logs\_prefix](#input\_alb\_logs\_prefix) | `alb_logs_bucket` s3 bucket prefix | `string` | `""` | no | +| [alb\_name](#input\_alb\_name) | The name of the ALB (e.g. `argocd`) provisioned by `alb-controller`. Works together with `var.alb_group_name` | `string` | `null` | no | +| [anonymous\_enabled](#input\_anonymous\_enabled) | Toggles anonymous user access using default RBAC setting (Defaults to read-only) | `bool` | `false` | no | +| [argocd\_apps\_chart](#input\_argocd\_apps\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | `"argocd-apps"` | no | +| [argocd\_apps\_chart\_description](#input\_argocd\_apps\_chart\_description) | Set release description attribute (visible in the history). | `string` | `"A Helm chart for managing additional Argo CD Applications and Projects"` | no | +| [argocd\_apps\_chart\_repository](#input\_argocd\_apps\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | `"https://argoproj.github.io/argo-helm"` | no | +| [argocd\_apps\_chart\_values](#input\_argocd\_apps\_chart\_values) | Additional values to yamlencode as `helm_release` values for the argocd\_apps chart | `any` | `{}` | no | +| [argocd\_apps\_chart\_version](#input\_argocd\_apps\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `"0.0.3"` | no | +| [argocd\_apps\_enabled](#input\_argocd\_apps\_enabled) | Enable argocd apps | `bool` | `true` | no | +| [argocd\_create\_namespaces](#input\_argocd\_create\_namespaces) | ArgoCD create namespaces policy | `bool` | `false` | no | +| [argocd\_rbac\_default\_policy](#input\_argocd\_rbac\_default\_policy) | Default ArgoCD RBAC default role.

See https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/#basic-built-in-roles for more information. | `string` | `"role:readonly"` | no | +| [argocd\_rbac\_groups](#input\_argocd\_rbac\_groups) | List of ArgoCD Group Role Assignment strings to be added to the argocd-rbac configmap policy.csv item.
e.g.
[
{
group: idp-group-name,
role: argocd-role-name
},
]
becomes: `g, idp-group-name, role:argocd-role-name`
See https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/ for more information. |
list(object({
group = string,
role = string
}))
| `[]` | no | +| [argocd\_rbac\_policies](#input\_argocd\_rbac\_policies) | List of ArgoCD RBAC Permission strings to be added to the argocd-rbac configmap policy.csv item.

See https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/ for more information. | `list(string)` | `[]` | no | +| [argocd\_repositories](#input\_argocd\_repositories) | Map of objects defining an `argocd_repo` to configure. The key is the name of the ArgoCD repository. |
map(object({
environment = string # The environment where the `argocd_repo` component is deployed.
stage = string # The stage where the `argocd_repo` component is deployed.
tenant = string # The tenant where the `argocd_repo` component is deployed.
}))
| `{}` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [certificate\_issuer](#input\_certificate\_issuer) | Certificate manager cluster issuer | `string` | `"letsencrypt-staging"` | no | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | `"argo-cd"` | no | +| [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `null` | no | +| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | `"https://argoproj.github.io/argo-helm"` | no | +| [chart\_values](#input\_chart\_values) | Additional values to yamlencode as `helm_release` values. | `any` | `{}` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `"5.55.0"` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_github\_webhook](#input\_create\_github\_webhook) | Enable GitHub webhook creation

Use this to create the GitHub Webhook for the given ArgoCD repo using the value created when `var.github_webhook_enabled` is `true`. | `bool` | `true` | no | +| [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false`. | `bool` | `false` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [forecastle\_enabled](#input\_forecastle\_enabled) | Toggles Forecastle integration in the deployed chart | `bool` | `false` | no | +| [github\_base\_url](#input\_github\_base\_url) | This is the target GitHub base API endpoint. Providing a value is a requirement when working with GitHub Enterprise. It is optional to provide this value and it can also be sourced from the `GITHUB_BASE_URL` environment variable. The value must end with a slash, for example: `https://terraformtesting-ghe.westus.cloudapp.azure.com/` | `string` | `null` | no | +| [github\_default\_notifications\_enabled](#input\_github\_default\_notifications\_enabled) | Enable default GitHub commit statuses notifications (required for CD sync mode) | `bool` | `true` | no | +| [github\_organization](#input\_github\_organization) | GitHub Organization | `string` | n/a | yes | +| [github\_token\_override](#input\_github\_token\_override) | Use the value of this variable as the GitHub token instead of reading it from SSM | `string` | `null` | no | +| [github\_webhook\_enabled](#input\_github\_webhook\_enabled) | Enable GitHub webhook integration

Use this to create a secret value and pass it to the argo-cd chart | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [host](#input\_host) | Host name to use for ingress and ALB | `string` | `""` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | `"argocd"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [notifications\_notifiers](#input\_notifications\_notifiers) | Notification Triggers to configure.

See: https://argocd-notifications.readthedocs.io/en/stable/triggers/
See: [Example value in argocd-notifications Helm Chart](https://github.com/argoproj/argo-helm/blob/a0a74fb43d147073e41aadc3d88660b312d6d638/charts/argocd-notifications/values.yaml#L352) |
object({
ssm_path_prefix = optional(string, "/argocd/notifications/notifiers")
# service.webhook.:
webhook = optional(map(
object({
url = string
headers = optional(list(
object({
name = string
value = string
})
), [])
insecureSkipVerify = optional(bool, false)
})
))
})
| `{}` | no | +| [notifications\_templates](#input\_notifications\_templates) | Notification Templates to configure.

See: https://argocd-notifications.readthedocs.io/en/stable/templates/
See: [Example value in argocd-notifications Helm Chart](https://github.com/argoproj/argo-helm/blob/a0a74fb43d147073e41aadc3d88660b312d6d638/charts/argocd-notifications/values.yaml#L158) |
map(object({
message = string
alertmanager = optional(object({
labels = map(string)
annotations = map(string)
generatorURL = string
}))
webhook = optional(map(
object({
method = optional(string)
path = optional(string)
body = optional(string)
})
))
}))
| `{}` | no | +| [notifications\_triggers](#input\_notifications\_triggers) | Notification Triggers to configure.

See: https://argocd-notifications.readthedocs.io/en/stable/triggers/
See: [Example value in argocd-notifications Helm Chart](https://github.com/argoproj/argo-helm/blob/a0a74fb43d147073e41aadc3d88660b312d6d638/charts/argocd-notifications/values.yaml#L352) |
map(list(
object({
oncePer = optional(string)
send = list(string)
when = string
})
))
| `{}` | no | +| [oidc\_enabled](#input\_oidc\_enabled) | Toggles OIDC integration in the deployed chart | `bool` | `false` | no | +| [oidc\_issuer](#input\_oidc\_issuer) | OIDC issuer URL | `string` | `""` | no | +| [oidc\_name](#input\_oidc\_name) | Name of the OIDC resource | `string` | `""` | no | +| [oidc\_rbac\_scopes](#input\_oidc\_rbac\_scopes) | OIDC RBAC scopes to request | `string` | `"[argocd_realm_access]"` | no | +| [oidc\_requested\_scopes](#input\_oidc\_requested\_scopes) | Set of OIDC scopes to request | `string` | `"[\"openid\", \"profile\", \"email\", \"groups\"]"` | no | +| [rbac\_enabled](#input\_rbac\_enabled) | Enable Service Account for pods. | `bool` | `true` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region. | `string` | n/a | yes | +| [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
| `null` | no | +| [saml\_enabled](#input\_saml\_enabled) | Toggles SAML integration in the deployed chart | `bool` | `false` | no | +| [saml\_rbac\_scopes](#input\_saml\_rbac\_scopes) | SAML RBAC scopes to request | `string` | `"[email,groups]"` | no | +| [saml\_sso\_providers](#input\_saml\_sso\_providers) | SAML SSO providers components |
map(object({
component = string
environment = optional(string, null)
}))
| `{}` | no | +| [service\_type](#input\_service\_type) | Service type for exposing the ArgoCD service. The available type values and their behaviors are:
ClusterIP: Exposes the Service on a cluster-internal IP. Choosing this value makes the Service only reachable from within the cluster.
NodePort: Exposes the Service on each Node's IP at a static port (the NodePort).
LoadBalancer: Exposes the Service externally using a cloud provider's load balancer. | `string` | `"NodePort"` | no | +| [slack\_notifications](#input\_slack\_notifications) | ArgoCD Slack notification configuration. Requires Slack Bot created with token stored at the given SSM Parameter path.

See: https://argocd-notifications.readthedocs.io/en/stable/services/slack/ |
object({
token_ssm_path = optional(string, "/argocd/notifications/notifiers/slack/token")
api_url = optional(string, null)
username = optional(string, "ArgoCD")
icon = optional(string, null)
})
| `{}` | no | +| [slack\_notifications\_enabled](#input\_slack\_notifications\_enabled) | Whether or not to enable Slack notifications. See `var.slack_notifications.` | `bool` | `false` | no | +| [ssm\_github\_api\_key](#input\_ssm\_github\_api\_key) | SSM path to the GitHub API key | `string` | `"/argocd/github/api_key"` | no | +| [ssm\_oidc\_client\_id](#input\_ssm\_oidc\_client\_id) | The SSM Parameter Store path for the ID of the IdP client | `string` | `"/argocd/oidc/client_id"` | no | +| [ssm\_oidc\_client\_secret](#input\_ssm\_oidc\_client\_secret) | The SSM Parameter Store path for the secret of the IdP client | `string` | `"/argocd/oidc/client_secret"` | no | +| [ssm\_store\_account](#input\_ssm\_store\_account) | Account storing SSM parameters | `string` | n/a | yes | +| [ssm\_store\_account\_region](#input\_ssm\_store\_account\_region) | AWS region storing SSM parameters | `string` | n/a | yes | +| [ssm\_store\_account\_tenant](#input\_ssm\_store\_account\_tenant) | Tenant of the account storing SSM parameters.

If the tenant label is not used, leave this as null. | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `300` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [github\_webhook\_value](#output\_github\_webhook\_value) | The value of the GitHub webhook secret used for ArgoCD | + + + +## References + +- [Argo CD](https://argoproj.github.io/cd/) +- [Argo CD Docs](https://argo-cd.readthedocs.io/en/stable/) +- [Argo Helm Chart](https://github.com/argoproj/argo-helm/blob/master/charts/argo-cd/) + +[](https://cpco.io/component) diff --git a/modules/eks/argocd/context.tf b/modules/eks/argocd/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/argocd/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/argocd/data.tf b/modules/eks/argocd/data.tf new file mode 100644 index 000000000..212c20e51 --- /dev/null +++ b/modules/eks/argocd/data.tf @@ -0,0 +1,45 @@ +locals { + oidc_client_id = local.oidc_enabled ? data.aws_ssm_parameter.oidc_client_id[0].value : "" + oidc_client_secret = local.oidc_enabled ? data.aws_ssm_parameter.oidc_client_secret[0].value : "" +} + +# NOTE: OIDC parameters are global, hence why they use a separate AWS provider + +# +# These variables are depreciated but should not yet be removed. Future iterations of this component will delete these variables +# + +data "aws_ssm_parameter" "oidc_client_id" { + count = local.oidc_enabled_count + name = var.ssm_oidc_client_id + with_decryption = true + + provider = aws.config_secrets +} + +data "aws_ssm_parameter" "oidc_client_secret" { + count = local.oidc_enabled_count + name = var.ssm_oidc_client_secret + with_decryption = true + + provider = aws.config_secrets +} + +data "aws_ssm_parameter" "github_deploy_key" { + for_each = local.enabled ? var.argocd_repositories : {} + + name = local.enabled ? format( + module.argocd_repo[each.key].outputs.deploy_keys_ssm_path_format, + format( + "${module.this.tenant != null ? "%[1]s/" : ""}%[2]s-%[3]s${length(module.this.attributes) > 0 ? "-%[4]s" : "%[4]s"}", + module.this.tenant, + module.this.environment, + module.this.stage, + join("-", module.this.attributes) + ) + ) : null + + with_decryption = true + + provider = aws.config_secrets +} diff --git a/modules/eks/argocd/github_webhook.tf b/modules/eks/argocd/github_webhook.tf new file mode 100644 index 000000000..b0696ee38 --- /dev/null +++ b/modules/eks/argocd/github_webhook.tf @@ -0,0 +1,59 @@ +# The GitHub webhook can be created with this component, with another component (such as github-webhook), or manually. +# However, we need to define the value for the webhook secret now with the ArgoCD chart deployment. Store it in SSM for reference. +# +# We need to create the webhook with a separate component if we're deploying argocd-repos for multiple GitHub Organizations +locals { + github_webhook_enabled = local.enabled && var.github_webhook_enabled + create_github_webhook = local.github_webhook_enabled && var.create_github_webhook + + webhook_github_secret = local.github_webhook_enabled ? try(random_password.webhook["github"].result, null) : "" +} + +variable "github_webhook_enabled" { + type = bool + default = true + description = < { + clone_url = module.argocd_repo[k].outputs.repository_ssh_clone_url + github_deploy_key = data.aws_ssm_parameter.github_deploy_key[k].value + repository = module.argocd_repo[k].outputs.repository + } + } : {} + + credential_templates = flatten(concat([ + for k, v in local.argocd_repositories : [ + { + name = "configs.credentialTemplates.${k}.url" + value = v.clone_url + type = "string" + }, + { + name = "configs.credentialTemplates.${k}.sshPrivateKey" + value = nonsensitive(v.github_deploy_key) + type = "string" + }, + ] + ], + [ + for s, v in local.notifications_notifiers_ssm_configs : [ + for k, i in v : [ + { + name = "notifications.secret.items.${s}_${k}" + value = i + type = "string" + } + ] + ] + ], + local.github_webhook_enabled ? [ + { + name = "configs.secret.githubSecret" + value = nonsensitive(local.webhook_github_secret) + type = "string" + } + ] : [], + local.slack_notifications_enabled ? [ + { + name = "notifications.secret.items.slack-token" + value = data.aws_ssm_parameter.slack_notifications[0].value + type = "string" + } + ] : [] + )) + regional_service_discovery_domain = "${module.this.environment}.${module.dns_gbl_delegated.outputs.default_domain_name}" + host = var.host != "" ? var.host : format("%s.%s", var.name, local.regional_service_discovery_domain) + url = format("https://%s", local.host) + + oidc_config_map = local.oidc_enabled ? { + server : { + config : { + "oidc.config" = <<-EOT + name: ${var.oidc_name} + issuer: ${var.oidc_issuer} + clientID: ${local.oidc_client_id} + clientSecret: ${local.oidc_client_secret} + requestedScopes: ${var.oidc_requested_scopes} + EOT + } + } + } : {} + + saml_config_map = local.saml_enabled ? { + configs : { + params : { + "dexserver.disable.tls" = true + } + cm : { + "url" = local.url + "dex.config" = join("\n", [ + local.dex_config_connectors + ]) + } + } + } : {} + + dex_config_connectors = yamlencode({ + connectors = [ + for name, config in(local.enabled ? var.saml_sso_providers : {}) : + { + type = "saml" + id = "saml" + name = name + config = { + ssoURL = module.saml_sso_providers[name].outputs.url + caData = base64encode(format("-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----", module.saml_sso_providers[name].outputs.ca)) + redirectURI = format("https://%s/api/dex/callback", local.host) + entityIssuer = format("https://%s/api/dex/callback", local.host) + usernameAttr = module.saml_sso_providers[name].outputs.usernameAttr + emailAttr = module.saml_sso_providers[name].outputs.emailAttr + groupsAttr = module.saml_sso_providers[name].outputs.groupsAttr + ssoIssuer = module.saml_sso_providers[name].outputs.issuer + } + } + ] + } + ) +} + +module "argocd" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + name = "argocd" # avoids hitting length restrictions on IAM Role names + chart = var.chart + repository = var.chart_repository + description = var.chart_description + chart_version = var.chart_version + kubernetes_namespace = local.kubernetes_namespace + create_namespace = var.create_namespace + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + + service_account_name = module.this.name + service_account_namespace = var.kubernetes_namespace + + set_sensitive = local.credential_templates + + values = compact([ + # standard k8s object settings + yamlencode({ + fullnameOverride = module.this.name, + serviceAccount = { + name = module.this.name + }, + resources = var.resources + rbac = { + create = var.rbac_enabled + } + }), + # argocd-specific settings + templatefile( + "${path.module}/resources/argocd-values.yaml.tpl", + { + admin_enabled = var.admin_enabled + anonymous_enabled = var.anonymous_enabled + alb_group_name = var.alb_group_name == null ? "" : var.alb_group_name + alb_logs_bucket = var.alb_logs_bucket + alb_logs_prefix = var.alb_logs_prefix + alb_name = var.alb_name == null ? "" : var.alb_name + application_repos = { for k, v in local.argocd_repositories : k => v.clone_url } + argocd_host = local.host + cert_issuer = var.certificate_issuer + forecastle_enabled = var.forecastle_enabled + ingress_host = local.host + name = module.this.name + oidc_enabled = local.oidc_enabled + oidc_rbac_scopes = var.oidc_rbac_scopes + saml_enabled = local.saml_enabled + saml_rbac_scopes = var.saml_rbac_scopes + service_type = var.service_type + rbac_default_policy = var.argocd_rbac_default_policy + rbac_policies = var.argocd_rbac_policies + rbac_groups = var.argocd_rbac_groups + } + ), + # argocd-notifications specific settings + templatefile( + "${path.module}/resources/argocd-notifications-values.yaml.tpl", + { + argocd_host = "https://${local.host}" + configs-hash = md5(jsonencode(local.notifications)) + secrets-hash = md5(jsonencode(local.notifications_notifiers_ssm_configs)) + } + ), + yamlencode(local.notifications), + yamlencode(merge( + local.oidc_config_map, + local.saml_config_map, + )), + yamlencode(var.chart_values) + ]) + + context = module.this.context +} + +data "kubernetes_resources" "crd" { + api_version = "apiextensions.k8s.io/v1" + kind = "CustomResourceDefinition" + field_selector = "metadata.name==applications.argoproj.io" +} + +module "argocd_apps" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + name = "" # avoids hitting length restrictions on IAM Role names + chart = var.argocd_apps_chart + repository = var.argocd_apps_chart_repository + description = var.argocd_apps_chart_description + chart_version = var.argocd_apps_chart_version + kubernetes_namespace = var.kubernetes_namespace + create_namespace = var.create_namespace + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + enabled = local.enabled && var.argocd_apps_enabled && length(data.kubernetes_resources.crd.objects) > 0 + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + values = compact([ + templatefile( + "${path.module}/resources/argocd-apps-values.yaml.tpl", + { + application_repos = { for k, v in local.argocd_repositories : k => v.clone_url } + create_namespaces = var.argocd_create_namespaces + namespace = local.kubernetes_namespace + tenant = module.this.tenant + environment = var.environment + stage = var.stage + attributes = var.attributes + } + ), + yamlencode(var.argocd_apps_chart_values) + ]) + + depends_on = [ + module.argocd + ] +} diff --git a/modules/eks/argocd/notifications.tf b/modules/eks/argocd/notifications.tf new file mode 100644 index 000000000..3e213d90f --- /dev/null +++ b/modules/eks/argocd/notifications.tf @@ -0,0 +1,319 @@ +data "aws_ssm_parameters_by_path" "argocd_notifications" { + for_each = local.notifications_notifiers_ssm_path + path = each.value + with_decryption = true +} + +data "aws_ssm_parameter" "slack_notifications" { + provider = aws.config_secrets + count = local.slack_notifications_enabled ? 1 : 0 + + name = var.slack_notifications.token_ssm_path + with_decryption = true +} + +module "notifications_templates" { + source = "cloudposse/config/yaml//modules/deepmerge" + version = "1.0.2" + + count = local.enabled ? 1 : 0 + + maps = [ + var.notifications_templates, + local.github_notifications_enabled ? { + app-deploy-succeded = { + message = "Application {{ .app.metadata.name }} is now running new version of deployments manifests." + webhook = { + app-repo-github-commit-status = { + for k, v in local.notifications_template_app_github_commit_status : + k => k == "body" ? jsonencode(merge(v, { state = "success" })) : tostring(v) + } + argocd-repo-github-commit-status = { + for k, v in local.notifications_template_argocd_repo_github_commit_status : + k => k == "body" ? jsonencode(merge(v, { state = "success" })) : tostring(v) + } + } + } + app-deploy-started = { + message = "Application {{ .app.metadata.name }} is now running new version of deployments manifests." + webhook = { + app-repo-github-commit-status = { + for k, v in local.notifications_template_app_github_commit_status : + k => k == "body" ? jsonencode(merge(v, { state = "pending" })) : tostring(v) + } + argocd-repo-github-commit-status = { + for k, v in local.notifications_template_argocd_repo_github_commit_status : + k => k == "body" ? jsonencode(merge(v, { state = "pending" })) : tostring(v) + } + } + } + app-deploy-failed = { + message = "Application {{ .app.metadata.name }} failed deploying new version." + webhook = { + app-repo-github-commit-status = { + for k, v in local.notifications_template_app_github_commit_status : + k => k == "body" ? jsonencode(merge(v, { state = "error" })) : tostring(v) + } + argocd-repo-github-commit-status = { + for k, v in local.notifications_template_argocd_repo_github_commit_status : + k => k == "body" ? jsonencode(merge(v, { state = "error" })) : tostring(v) + } + } + } + } : {}, + local.slack_notifications_enabled ? { + app-created = { + message = "Application {{ .app.metadata.name }} has been created." + slack = { + attachments = templatefile("${path.module}/resources/argocd-slack-message.tpl", + { + color = "#00ff00" + } + ) + } + }, + app-deleted = { + message = "Application {{ .app.metadata.name }} was deleted." + slack = { + attachments = templatefile("${path.module}/resources/argocd-slack-message.tpl", + { + color = "#FFA500" + } + ) + } + }, + app-success = { + message = "Application {{ .app.metadata.name }} deployment was successful!" + slack = { + attachments = templatefile("${path.module}/resources/argocd-slack-message.tpl", + { + color = "#00ff00" + } + ) + } + }, + app-failure = { + message = "Application {{ .app.metadata.name }} deployment failed!" + slack = { + attachments = templatefile("${path.module}/resources/argocd-slack-message.tpl", + { + color = "#FF0000" + } + ) + } + }, + app-started = { + message = "Application {{ .app.metadata.name }} started deployment..." + slack = { + attachments = templatefile("${path.module}/resources/argocd-slack-message.tpl", + { + color = "#0000ff" + } + ) + } + }, + app-health-degraded = { + message = "Application {{ .app.metadata.name }} health has degraded!" + slack = { + attachments = templatefile("${path.module}/resources/argocd-slack-message.tpl", + { + color = "#FF0000" + } + ) + } + } + } : {} + ] +} + +module "notifications_notifiers" { + source = "cloudposse/config/yaml//modules/deepmerge" + version = "1.0.2" + + count = local.enabled ? 1 : 0 + + maps = [ + var.notifications_notifiers, + local.github_notifications_enabled ? { + webhook = { + app-repo-github-commit-status = local.notification_default_notifier_github_commit_status + argocd-repo-github-commit-status = local.notification_default_notifier_github_commit_status + } + } : {}, + local.slack_notifications_enabled ? { + slack = local.notification_slack_service + } : {} + ] +} + +locals { + github_notifications_enabled = local.enabled && var.github_default_notifications_enabled + slack_notifications_enabled = local.enabled && var.slack_notifications_enabled + + notification_default_notifier_github_commit_status = { + url = "https://api.github.com" + headers = [ + { + name = "Authorization" + value = "token $common_github-token" + } + ] + insecureSkipVerify = false + } + + notification_slack_service = { + apiURL = var.slack_notifications.api_url + token = "$slack-token" + username = var.slack_notifications.username + icon = var.slack_notifications.icon + } + + notifications_notifiers = jsondecode(local.enabled ? jsonencode(module.notifications_notifiers[0].merged) : jsonencode({})) + + ## Get list of notifiers services + notifications_notifiers_variables = merge( + { + for key, value in local.notifications_notifiers : + key => { for param_name, param_value in value : param_name => param_value if param_value != null } + if key != "ssm_path_prefix" && key != "webhook" + }, + { + for key, value in coalesce(local.notifications_notifiers.webhook, {}) : + format("webhook_%s", key) => + { for param_name, param_value in value : param_name => param_value if param_value != null } + } + ) + + ## Get paths to read configs for each notifier service + notifications_notifiers_ssm_path = merge( + { + for key, value in local.notifications_notifiers_variables : + key => format("%s/%s/", local.notifications_notifiers.ssm_path_prefix, key) + }, + { + common = format("%s/common/", local.notifications_notifiers.ssm_path_prefix) + }, + ) + + ## Read SSM secrets into object for each notifier service + notifications_notifiers_ssm_configs = { + for key, value in data.aws_ssm_parameters_by_path.argocd_notifications : + key => zipmap( + [for name in value.names : trimprefix(name, local.notifications_notifiers_ssm_path[key])], + nonsensitive(value.values) + ) + } + + ## Define notifier service object with placeholders as values. This is ArgoCD convention + notifications_notifiers_ssm_configs_keys = { + for key, value in data.aws_ssm_parameters_by_path.argocd_notifications : + key => zipmap( + [for name in value.names : trimprefix(name, local.notifications_notifiers_ssm_path[key])], + [for name in value.names : format("$%s_%s", key, trimprefix(name, local.notifications_notifiers_ssm_path[key]))] + ) + } + + notifications_template_github_commit_status = { + method = "POST" + body = { + description = "ArgoCD" + target_url = "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}" + context = "continuous-delivery/{{.app.metadata.name}}" + } + } + + notifications_template_app_github_commit_status = merge(local.notifications_template_github_commit_status, { + path = "/repos/{{call .repo.FullNameByRepoURL .app.metadata.annotations.app_repository}}/statuses/{{.app.metadata.annotations.app_commit}}" + }) + + notifications_template_argocd_repo_github_commit_status = merge(local.notifications_template_github_commit_status, { + path = "/repos/{{call .repo.FullNameByRepoURL .app.spec.source.repoURL}}/statuses/{{.app.status.operationState.operation.sync.revision}}" + }) + + notifications_templates = jsondecode(local.enabled ? jsonencode(module.notifications_templates[0].merged) : jsonencode({})) + + notifications_default_triggers = merge(local.github_notifications_enabled ? { + on-deploy-started = [ + { + when = "app.status.operationState.phase in ['Running'] or ( app.status.operationState.phase == 'Succeeded' and app.status.health.status == 'Progressing' )" + oncePer = "app.status.sync.revision" + send = ["app-deploy-started"] + } + ], + on-deploy-succeded = [ + { + when = "app.status.operationState.phase == 'Succeeded' and app.status.health.status == 'Healthy'" + oncePer = "app.status.sync.revision" + send = ["app-deploy-succeded"] + } + ], + on-deploy-failed = [ + { + when = "app.status.operationState.phase in ['Error', 'Failed' ] or ( app.status.operationState.phase == 'Succeeded' and app.status.health.status == 'Degraded' )" + oncePer = "app.status.sync.revision" + send = ["app-deploy-failed"] + } + ] + } : {}, + local.slack_notifications_enabled ? { + # Full catalog of notification triggers as default + # https://github.com/argoproj/argo-cd/tree/master/notifications_catalog/triggers + on-created = [ + { + when = "true" + send = ["app-created"] + oncePer = "app.metadata.name" + } + ], + on-deleted = [ + { + when = "app.metadata.deletionTimestamp != nil" + send = ["app-deleted"] + oncePer = "app.metadata.deletionTimestamp" + } + ], + on-success = [ + { + when = "app.status.operationState != nil and app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy'" + send = ["app-success"] + oncePer = "app.status.operationState?.syncResult?.revision" + } + ], + on-failure = [ + { + when = "app.status.operationState != nil and (app.status.operationState.phase in ['Error', 'Failed'] or app.status.sync.status == 'Unknown')" + send = ["app-failure"] + oncePer = "app.status.operationState?.syncResult?.revision" + } + ], + on-health-degraded = [ + { + when = "app.status.health.status == 'Degraded'" + send = ["app-health-degraded"] + oncePer = "app.status.operationState?.syncResult?.revision" + } + ], + on-started = [ + { + when = "app.status.operationState != nil and app.status.operationState.phase in ['Running']" + send = ["app-started"] + oncePer = "app.status.operationState?.syncResult?.revision" + } + ] + } : {} + ) + + notifications_triggers = merge(var.notifications_triggers, local.notifications_default_triggers) + + notifications = { + notifications = { + templates = { for key, value in local.notifications_templates : format("template.%s", key) => yamlencode(value) } + triggers = { for key, value in local.notifications_triggers : format("trigger.%s", key) => yamlencode(value) } + notifiers = { + for key, value in local.notifications_notifiers_variables : + format("service.%s", replace(key, "_", ".")) => + yamlencode(merge(local.notifications_notifiers_ssm_configs_keys[key], value)) + } + } + } +} diff --git a/modules/eks/argocd/outputs.tf b/modules/eks/argocd/outputs.tf new file mode 100644 index 000000000..043ed3fdf --- /dev/null +++ b/modules/eks/argocd/outputs.tf @@ -0,0 +1,5 @@ +output "github_webhook_value" { + description = "The value of the GitHub webhook secret used for ArgoCD" + sensitive = true + value = local.webhook_github_secret +} diff --git a/modules/eks/argocd/provider-github.tf b/modules/eks/argocd/provider-github.tf new file mode 100644 index 000000000..7c99b6240 --- /dev/null +++ b/modules/eks/argocd/provider-github.tf @@ -0,0 +1,36 @@ +variable "github_base_url" { + type = string + description = "This is the target GitHub base API endpoint. Providing a value is a requirement when working with GitHub Enterprise. It is optional to provide this value and it can also be sourced from the `GITHUB_BASE_URL` environment variable. The value must end with a slash, for example: `https://terraformtesting-ghe.westus.cloudapp.azure.com/`" + default = null +} + +variable "ssm_github_api_key" { + type = string + description = "SSM path to the GitHub API key" + default = "/argocd/github/api_key" +} + +variable "github_token_override" { + type = string + description = "Use the value of this variable as the GitHub token instead of reading it from SSM" + default = null +} + +locals { + github_token = local.create_github_webhook ? coalesce(var.github_token_override, try(data.aws_ssm_parameter.github_api_key[0].value, null)) : "" +} + +data "aws_ssm_parameter" "github_api_key" { + count = local.create_github_webhook ? 1 : 0 + name = var.ssm_github_api_key + with_decryption = true + + #provider = aws.config_secrets +} + +# We will only need the github provider if we are creating the GitHub webhook with github_repository_webhook. +provider "github" { + base_url = local.create_github_webhook ? var.github_base_url : null + owner = local.create_github_webhook ? var.github_organization : null + token = local.create_github_webhook ? local.github_token : null +} diff --git a/modules/eks/argocd/provider-helm.tf b/modules/eks/argocd/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/modules/eks/argocd/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/argocd/provider-secrets.tf b/modules/eks/argocd/provider-secrets.tf new file mode 100644 index 000000000..277d04a10 --- /dev/null +++ b/modules/eks/argocd/provider-secrets.tf @@ -0,0 +1,22 @@ +provider "aws" { + alias = "config_secrets" + region = var.ssm_store_account_region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles_config_secrets.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles_config_secrets.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles_config_secrets" { + source = "../../account-map/modules/iam-roles" + stage = var.ssm_store_account + tenant = var.ssm_store_account_tenant + context = module.this.context +} diff --git a/modules/eks/argocd/providers.tf b/modules/eks/argocd/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/eks/argocd/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/argocd/remote-state.tf b/modules/eks/argocd/remote-state.tf new file mode 100644 index 000000000..c63f3572f --- /dev/null +++ b/modules/eks/argocd/remote-state.tf @@ -0,0 +1,43 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} + +module "dns_gbl_delegated" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + environment = "gbl" + component = "dns-delegated" + + context = module.this.context +} + +module "saml_sso_providers" { + for_each = local.enabled ? var.saml_sso_providers : {} + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = each.value.component + environment = each.value.environment + + context = module.this.context +} + +module "argocd_repo" { + for_each = local.enabled ? var.argocd_repositories : {} + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = each.key + environment = each.value.environment + stage = each.value.stage + tenant = each.value.tenant + + context = module.this.context +} diff --git a/modules/eks/argocd/resources/argo-horizontal-color.png b/modules/eks/argocd/resources/argo-horizontal-color.png new file mode 100644 index 000000000..1154626e3 Binary files /dev/null and b/modules/eks/argocd/resources/argo-horizontal-color.png differ diff --git a/modules/eks/argocd/resources/argocd-apps-values.yaml.tpl b/modules/eks/argocd/resources/argocd-apps-values.yaml.tpl new file mode 100644 index 000000000..afef8d8bc --- /dev/null +++ b/modules/eks/argocd/resources/argocd-apps-values.yaml.tpl @@ -0,0 +1,23 @@ +applications: +%{ for name, url in application_repos ~} +- name: ${name} + namespace: ${namespace} + additionalLabels: {} + additionalAnnotations: {} + project: default + source: + repoURL: ${url} + targetRevision: HEAD + path: ./%{ if tenant != null }${tenant}/%{ endif }${environment}-${stage}%{ for attr in attributes }-${attr}%{ endfor }/${namespace} + directory: + recurse: false + destination: + server: https://kubernetes.default.svc + namespace: ${namespace} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=${create_namespaces} +%{ endfor ~} diff --git a/modules/eks/argocd/resources/argocd-notifications-values.yaml.tpl b/modules/eks/argocd/resources/argocd-notifications-values.yaml.tpl new file mode 100644 index 000000000..e8b4bab5e --- /dev/null +++ b/modules/eks/argocd/resources/argocd-notifications-values.yaml.tpl @@ -0,0 +1,9 @@ +notifications: + secret: + # create: false # Do not create an argocd-notifications-secret β€” this secret should instead be created via sops-secrets-operator + create: true + + argocdUrl: ${argocd_host} + podAnnotations: + checksum/config: ${configs-hash} + checksum/secrets: ${secrets-hash} diff --git a/modules/eks/argocd/resources/argocd-values.yaml.tpl b/modules/eks/argocd/resources/argocd-values.yaml.tpl new file mode 100644 index 000000000..6c12148d2 --- /dev/null +++ b/modules/eks/argocd/resources/argocd-values.yaml.tpl @@ -0,0 +1,136 @@ +global: + image: + imagePullPolicy: IfNotPresent + +crds: + install: true + +dex: + image: + imagePullPolicy: IfNotPresent + tag: v2.30.2 + +controller: + replicas: 1 + +server: + replicas: 2 + + ingress: + enabled: true + annotations: + cert-manager.io/cluster-issuer: ${cert_issuer} + external-dns.alpha.kubernetes.io/hostname: ${ingress_host} + external-dns.alpha.kubernetes.io/ttl: "60" + kubernetes.io/ingress.class: alb +%{ if alb_group_name != "" ~} + alb.ingress.kubernetes.io/group.name: ${alb_group_name} +%{ endif ~} +%{ if alb_name != "" ~} + alb.ingress.kubernetes.io/load-balancer-name: ${alb_name} +%{ endif ~} + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/backend-protocol: HTTPS + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80},{"HTTPS":443}]' + alb.ingress.kubernetes.io/ssl-redirect: '443' + alb.ingress.kubernetes.io/load-balancer-attributes: + routing.http.drop_invalid_header_fields.enabled=true, +%{ if alb_logs_bucket != "" ~} + access_logs.s3.enabled=true, + access_logs.s3.bucket=${alb_logs_bucket}, + access_logs.s3.prefix=${alb_logs_prefix} +%{ endif ~} +%{ if forecastle_enabled == true ~} + forecastle.stakater.com/appName: "ArgoCD" + forecastle.stakater.com/expose: "true" + forecastle.stakater.com/group: "portal" + forecastle.stakater.com/icon: https://argoproj.github.io/argo-cd/assets/logo.png + forecastle.stakater.com/instance: default +%{ endif ~} + hosts: + - ${argocd_host} + extraPaths: + # Must use implementation specific wildcard paths + # https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/1702#issuecomment-736890777 + - path: /* + pathType: ImplementationSpecific + backend: + service: + name: ${name}-server + port: + name: https + tls: + - hosts: + - ${argocd_host} + secretName: argocd-tls + https: false + + service: + type: ${service_type} + + secret: + create: true + + config: + url: https://${argocd_host} + admin.enabled: "${admin_enabled}" + users.anonymous_enabled: "${anonymous_enabled}" + + # https://github.com/argoproj/argo-cd/issues/7835 + kustomize.buildOptions: --enable-helm + +# overridden in main.tf +# oidc.conf : ~ +# dex.config: ~ + + repositories: | +%{ for name, url in application_repos ~} + - url: ${url} + sshPrivateKeySecret: + name: argocd-repo-creds-${name} + key: sshPrivateKey +%{ endfor ~} + resource.customizations: | + admissionregistration.k8s.io/MutatingWebhookConfiguration: + ignoreDifferences: | + jsonPointers: + - /webhooks/0/clientConfig/caBundle + argoproj.io/Application: + health.lua: | + hs = {} + hs.status = "Progressing" + hs.message = "" + if obj.status ~= nil then + if obj.status.health ~= nil then + hs.status = obj.status.health.status + if obj.status.health.message ~= nil then + hs.message = obj.status.health.message + end + end + end + return hs + + rbacConfig: + policy.default: ${rbac_default_policy} + policy.csv: | +%{ for policy in rbac_policies ~} + ${policy} +%{ endfor ~} +%{for item in rbac_groups ~} + g, ${item.group}, role:${item.role} +%{ endfor ~} + +%{ if oidc_enabled == true ~} + scopes: '${oidc_rbac_scopes}' +%{ endif ~} +%{ if saml_enabled == true ~} + scopes: '${saml_rbac_scopes}' +%{ endif ~} + + policy.default: role:readonly + +repoServer: + replicas: 2 + +applicationSet: + replicas: 2 diff --git a/modules/eks/argocd/variables-argocd-apps.tf b/modules/eks/argocd/variables-argocd-apps.tf new file mode 100644 index 000000000..7ecab3742 --- /dev/null +++ b/modules/eks/argocd/variables-argocd-apps.tf @@ -0,0 +1,35 @@ +variable "argocd_apps_chart_description" { + type = string + description = "Set release description attribute (visible in the history)." + default = "A Helm chart for managing additional Argo CD Applications and Projects" +} + +variable "argocd_apps_chart" { + type = string + description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended." + default = "argocd-apps" +} + +variable "argocd_apps_chart_repository" { + type = string + description = "Repository URL where to locate the requested chart." + default = "https://argoproj.github.io/argo-helm" +} + +variable "argocd_apps_chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." + default = "0.0.3" +} + +variable "argocd_apps_enabled" { + type = bool + description = "Enable argocd apps" + default = true +} + +variable "argocd_apps_chart_values" { + type = any + description = "Additional values to yamlencode as `helm_release` values for the argocd_apps chart" + default = {} +} diff --git a/modules/eks/argocd/variables-argocd-notifications.tf b/modules/eks/argocd/variables-argocd-notifications.tf new file mode 100644 index 000000000..4a8bce937 --- /dev/null +++ b/modules/eks/argocd/variables-argocd-notifications.tf @@ -0,0 +1,99 @@ +variable "github_default_notifications_enabled" { + type = bool + default = true + description = "Enable default GitHub commit statuses notifications (required for CD sync mode)" +} + +variable "notifications_templates" { + description = <<-EOT + Notification Templates to configure. + + See: https://argocd-notifications.readthedocs.io/en/stable/templates/ + See: [Example value in argocd-notifications Helm Chart](https://github.com/argoproj/argo-helm/blob/a0a74fb43d147073e41aadc3d88660b312d6d638/charts/argocd-notifications/values.yaml#L158) + EOT + + type = map(object({ + message = string + alertmanager = optional(object({ + labels = map(string) + annotations = map(string) + generatorURL = string + })) + webhook = optional(map( + object({ + method = optional(string) + path = optional(string) + body = optional(string) + }) + )) + })) + + default = {} +} + +variable "notifications_triggers" { + description = <<-EOT + Notification Triggers to configure. + + See: https://argocd-notifications.readthedocs.io/en/stable/triggers/ + See: [Example value in argocd-notifications Helm Chart](https://github.com/argoproj/argo-helm/blob/a0a74fb43d147073e41aadc3d88660b312d6d638/charts/argocd-notifications/values.yaml#L352) + EOT + + type = map(list( + object({ + oncePer = optional(string) + send = list(string) + when = string + }) + )) + + default = {} +} + +variable "notifications_notifiers" { + type = object({ + ssm_path_prefix = optional(string, "/argocd/notifications/notifiers") + # service.webhook.: + webhook = optional(map( + object({ + url = string + headers = optional(list( + object({ + name = string + value = string + }) + ), []) + insecureSkipVerify = optional(bool, false) + }) + )) + }) + description = <<-EOT + Notification Triggers to configure. + + See: https://argocd-notifications.readthedocs.io/en/stable/triggers/ + See: [Example value in argocd-notifications Helm Chart](https://github.com/argoproj/argo-helm/blob/a0a74fb43d147073e41aadc3d88660b312d6d638/charts/argocd-notifications/values.yaml#L352) + EOT + default = {} +} + +variable "slack_notifications_enabled" { + type = bool + default = false + description = "Whether or not to enable Slack notifications. See `var.slack_notifications." +} + +variable "slack_notifications" { + type = object({ + token_ssm_path = optional(string, "/argocd/notifications/notifiers/slack/token") + api_url = optional(string, null) + username = optional(string, "ArgoCD") + icon = optional(string, null) + }) + description = <<-EOT + ArgoCD Slack notification configuration. Requires Slack Bot created with token stored at the given SSM Parameter path. + + See: https://argocd-notifications.readthedocs.io/en/stable/services/slack/ + EOT + + default = {} +} diff --git a/modules/eks/argocd/variables-argocd.tf b/modules/eks/argocd/variables-argocd.tf new file mode 100644 index 000000000..ca4d54496 --- /dev/null +++ b/modules/eks/argocd/variables-argocd.tf @@ -0,0 +1,217 @@ +// ArgoCD variables + +variable "alb_group_name" { + type = string + description = "A name used in annotations to reuse an ALB (e.g. `argocd`) or to generate a new one" + default = null +} + +variable "alb_name" { + type = string + description = "The name of the ALB (e.g. `argocd`) provisioned by `alb-controller`. Works together with `var.alb_group_name`" + default = null +} + +variable "alb_logs_bucket" { + type = string + description = "The name of the bucket for ALB access logs. The bucket must have policy allowing the ELB logging principal" + default = "" +} + +variable "alb_logs_prefix" { + type = string + description = "`alb_logs_bucket` s3 bucket prefix" + default = "" +} + +variable "certificate_issuer" { + type = string + description = "Certificate manager cluster issuer" + default = "letsencrypt-staging" +} + +variable "argocd_create_namespaces" { + type = bool + description = "ArgoCD create namespaces policy" + default = false +} + +variable "argocd_repositories" { + type = map(object({ + environment = string # The environment where the `argocd_repo` component is deployed. + stage = string # The stage where the `argocd_repo` component is deployed. + tenant = string # The tenant where the `argocd_repo` component is deployed. + })) + description = "Map of objects defining an `argocd_repo` to configure. The key is the name of the ArgoCD repository." + default = {} +} + +variable "github_organization" { + type = string + description = "GitHub Organization" +} + +variable "ssm_store_account" { + type = string + description = "Account storing SSM parameters" +} + +variable "ssm_store_account_tenant" { + type = string + description = <<-EOT + Tenant of the account storing SSM parameters. + + If the tenant label is not used, leave this as null. + EOT + default = null +} + +variable "ssm_store_account_region" { + type = string + description = "AWS region storing SSM parameters" +} + +variable "ssm_oidc_client_id" { + type = string + description = "The SSM Parameter Store path for the ID of the IdP client" + default = "/argocd/oidc/client_id" +} + +variable "ssm_oidc_client_secret" { + type = string + description = "The SSM Parameter Store path for the secret of the IdP client" + default = "/argocd/oidc/client_secret" +} + +variable "host" { + type = string + description = "Host name to use for ingress and ALB" + default = "" +} + +variable "forecastle_enabled" { + type = bool + description = "Toggles Forecastle integration in the deployed chart" + default = false +} + +variable "admin_enabled" { + type = bool + description = "Toggles Admin user creation the deployed chart" + default = false +} + +variable "anonymous_enabled" { + type = bool + description = "Toggles anonymous user access using default RBAC setting (Defaults to read-only)" + default = false +} + +variable "oidc_enabled" { + type = bool + description = "Toggles OIDC integration in the deployed chart" + default = false +} + +variable "oidc_issuer" { + type = string + description = "OIDC issuer URL" + default = "" +} + +variable "oidc_name" { + type = string + description = "Name of the OIDC resource" + default = "" +} + +variable "oidc_rbac_scopes" { + type = string + description = "OIDC RBAC scopes to request" + default = "[argocd_realm_access]" +} + +variable "oidc_requested_scopes" { + type = string + description = "Set of OIDC scopes to request" + default = "[\"openid\", \"profile\", \"email\", \"groups\"]" +} + +variable "saml_enabled" { + type = bool + description = "Toggles SAML integration in the deployed chart" + default = false +} + +variable "saml_rbac_scopes" { + type = string + description = "SAML RBAC scopes to request" + default = "[email,groups]" +} + +variable "service_type" { + type = string + default = "NodePort" + description = <<-EOT + Service type for exposing the ArgoCD service. The available type values and their behaviors are: + ClusterIP: Exposes the Service on a cluster-internal IP. Choosing this value makes the Service only reachable from within the cluster. + NodePort: Exposes the Service on each Node's IP at a static port (the NodePort). + LoadBalancer: Exposes the Service externally using a cloud provider's load balancer. + EOT +} + +variable "argocd_rbac_policies" { + type = list(string) + default = [] + description = <<-EOT + List of ArgoCD RBAC Permission strings to be added to the argocd-rbac configmap policy.csv item. + + See https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/ for more information. + EOT +} + +variable "argocd_rbac_default_policy" { + type = string + default = "role:readonly" + description = <<-EOT + Default ArgoCD RBAC default role. + + See https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/#basic-built-in-roles for more information. + EOT +} + +variable "argocd_rbac_groups" { + type = list(object({ + group = string, + role = string + })) + default = [] + description = <<-EOT + List of ArgoCD Group Role Assignment strings to be added to the argocd-rbac configmap policy.csv item. + e.g. + [ + { + group: idp-group-name, + role: argocd-role-name + }, + ] + becomes: `g, idp-group-name, role:argocd-role-name` + See https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/ for more information. + EOT +} + +variable "eks_component_name" { + type = string + default = "eks/cluster" + description = "The name of the eks component" +} + +variable "saml_sso_providers" { + type = map(object({ + component = string + environment = optional(string, null) + })) + + default = {} + description = "SAML SSO providers components" +} diff --git a/modules/eks/argocd/variables-helm.tf b/modules/eks/argocd/variables-helm.tf new file mode 100644 index 000000000..af8411c3e --- /dev/null +++ b/modules/eks/argocd/variables-helm.tf @@ -0,0 +1,93 @@ +// Standard Helm Chart variables + +variable "region" { + description = "AWS Region." + type = string +} + +variable "chart_description" { + type = string + description = "Set release description attribute (visible in the history)." + default = null +} + +variable "chart" { + type = string + description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended." + default = "argo-cd" +} + +variable "chart_repository" { + type = string + description = "Repository URL where to locate the requested chart." + default = "https://argoproj.github.io/argo-helm" +} + +variable "chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." + default = "5.55.0" +} + +variable "resources" { + type = object({ + limits = object({ + cpu = string + memory = string + }) + requests = object({ + cpu = string + memory = string + }) + }) + default = null + description = "The cpu and memory of the deployment's limits and requests." +} + +variable "create_namespace" { + type = bool + description = "Create the namespace if it does not yet exist. Defaults to `false`." + default = false +} + +variable "kubernetes_namespace" { + type = string + description = "The namespace to install the release into." + default = "argocd" +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = 300 +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails." + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used." + default = true +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`." + default = true +} + +variable "chart_values" { + type = any + description = "Additional values to yamlencode as `helm_release` values." + default = {} +} + +variable "rbac_enabled" { + type = bool + default = true + description = "Enable Service Account for pods." +} diff --git a/modules/eks/argocd/versions.tf b/modules/eks/argocd/versions.tf new file mode 100644 index 000000000..0877dc2d0 --- /dev/null +++ b/modules/eks/argocd/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.6.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.9.0, != 2.21.0" + } + github = { + source = "integrations/github" + version = ">= 4.0" + } + random = { + source = "hashicorp/random" + version = ">= 3.5" + } + } +} diff --git a/modules/eks/aws-node-termination-handler/README.md b/modules/eks/aws-node-termination-handler/README.md index 757cd43a1..d6505fb97 100644 --- a/modules/eks/aws-node-termination-handler/README.md +++ b/modules/eks/aws-node-termination-handler/README.md @@ -1,7 +1,19 @@ -# Component: `aws-node-termination-handler` - -This component creates a Helm release for [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) on a Kubernetes cluster. [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) is a Kubernetes addon that (by default) monitors the EC2 IMDS endpoint for scheduled maintenance events, spot instance termination events, and rebalance recommendation events, and drains and/or cordons nodes upon such events. -This ensures that workloads on Kubernetes are evicted gracefully when a node needs to be terminated. +--- +tags: + - component/eks/aws-node-termination-handler + - layer/eks + - provider/aws + - provider/helm +--- + +# Component: `eks/aws-node-termination-handler` + +This component creates a Helm release for +[aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) on a Kubernetes cluster. +[aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) is a Kubernetes addon that (by +default) monitors the EC2 IMDS endpoint for scheduled maintenance events, spot instance termination events, and +rebalance recommendation events, and drains and/or cordons nodes upon such events. This ensures that workloads on +Kubernetes are evicted gracefully when a node needs to be terminated. ## Usage @@ -38,28 +50,30 @@ components: chart_values: {} ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0, != 2.21.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.0, != 2.21.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_node\_termination\_handler](#module\_aws\_node\_termination\_handler) | cloudposse/helm-release/aws | 0.5.0 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [aws\_node\_termination\_handler](#module\_aws\_node\_termination\_handler) | cloudposse/helm-release/aws | 0.10.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -77,11 +91,11 @@ components: | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | n/a | yes | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | `"aws-node-termination-handler"` | no | | [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `null` | no | -| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | n/a | yes | +| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | `"https://aws.github.io/eks-charts"` | no | | [chart\_values](#input\_chart\_values) | Additional values to yamlencode as `helm_release` values. | `any` | `{}` | no | -| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `null` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `"0.15.3"` | no | | [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false`. | `bool` | `null` | no | @@ -90,17 +104,16 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -114,7 +127,7 @@ components: | [rbac\_enabled](#input\_rbac\_enabled) | Service Account for pods. | `bool` | `true` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region. | `string` | n/a | yes | -| [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
| n/a | yes | +| [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
|
{
"limits": {
"cpu": "100m",
"memory": "128Mi"
},
"requests": {
"cpu": "50m",
"memory": "64Mi"
}
}
| no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -127,6 +140,7 @@ components: |------|-------------| | [metadata](#output\_metadata) | Block status of the deployed release | + ## References diff --git a/modules/eks/aws-node-termination-handler/default.auto.tfvars b/modules/eks/aws-node-termination-handler/default.auto.tfvars deleted file mode 100644 index 79fa04cf0..000000000 --- a/modules/eks/aws-node-termination-handler/default.auto.tfvars +++ /dev/null @@ -1,23 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - -name = "aws-node-termination-handler" - -chart = "aws-node-termination-handler" -chart_repository = "https://aws.github.io/eks-charts" -chart_version = "0.15.3" - -create_namespace = true -kubernetes_namespace = "aws-node-termination-handler" - -resources = { - limits = { - cpu = "100m" - memory = "128Mi" - }, - requests = { - cpu = "50m" - memory = "64Mi" - } -} diff --git a/modules/eks/aws-node-termination-handler/main.tf b/modules/eks/aws-node-termination-handler/main.tf index e4e2c1561..55b16d4ae 100644 --- a/modules/eks/aws-node-termination-handler/main.tf +++ b/modules/eks/aws-node-termination-handler/main.tf @@ -14,7 +14,7 @@ resource "kubernetes_namespace" "default" { module "aws_node_termination_handler" { source = "cloudposse/helm-release/aws" - version = "0.5.0" + version = "0.10.0" name = "" # avoids hitting length restrictions on IAM Role names chart = var.chart diff --git a/modules/eks/aws-node-termination-handler/provider-helm.tf b/modules/eks/aws-node-termination-handler/provider-helm.tf index 20e4d3837..91cc7f6d4 100644 --- a/modules/eks/aws-node-termination-handler/provider-helm.tf +++ b/modules/eks/aws-node-termination-handler/provider-helm.tf @@ -2,6 +2,12 @@ # # This file is a drop-in to provide a helm provider. # +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# # All the following variables are just about configuring the Kubernetes provider # to be able to modify EKS cluster. The reason there are so many options is # because at various times, each one of them has had problems, so we give you a choice. @@ -15,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -36,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -45,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -95,14 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] - certificate_authority_data = module.eks.outputs.eks_cluster_certificate_authority_data - eks_cluster_id = module.eks.outputs.eks_cluster_id - eks_cluster_endpoint = module.eks.outputs.eks_cluster_endpoint + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -113,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -132,21 +174,22 @@ provider "helm" { } } experiments { - manifest = var.helm_manifest_experiment_enabled + manifest = var.helm_manifest_experiment_enabled && module.this.enabled } } provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/aws-node-termination-handler/providers.tf b/modules/eks/aws-node-termination-handler/providers.tf index 74ff8e62c..89ed50a98 100644 --- a/modules/eks/aws-node-termination-handler/providers.tf +++ b/modules/eks/aws-node-termination-handler/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/aws-node-termination-handler/remote-state.tf b/modules/eks/aws-node-termination-handler/remote-state.tf index 6ef90fd26..c1ec8226d 100644 --- a/modules/eks/aws-node-termination-handler/remote-state.tf +++ b/modules/eks/aws-node-termination-handler/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/aws-node-termination-handler/variables.tf b/modules/eks/aws-node-termination-handler/variables.tf index 29173515e..2faebfaf0 100644 --- a/modules/eks/aws-node-termination-handler/variables.tf +++ b/modules/eks/aws-node-termination-handler/variables.tf @@ -12,17 +12,19 @@ variable "chart_description" { variable "chart" { type = string description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended." + default = "aws-node-termination-handler" } variable "chart_repository" { type = string description = "Repository URL where to locate the requested chart." + default = "https://aws.github.io/eks-charts" } variable "chart_version" { type = string description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." - default = null + default = "0.15.3" } variable "resources" { @@ -37,6 +39,16 @@ variable "resources" { }) }) description = "The cpu and memory of the deployment's limits and requests." + default = { + limits = { + cpu = "100m" + memory = "128Mi" + } + requests = { + cpu = "50m" + memory = "64Mi" + } + } } variable "create_namespace" { diff --git a/modules/eks/aws-node-termination-handler/versions.tf b/modules/eks/aws-node-termination-handler/versions.tf index 58318d20e..14c085342 100644 --- a/modules/eks/aws-node-termination-handler/versions.tf +++ b/modules/eks/aws-node-termination-handler/versions.tf @@ -4,11 +4,15 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } helm = { source = "hashicorp/helm" version = ">= 2.0" } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0, != 2.21.0" + } } } diff --git a/modules/eks/cert-manager/README.md b/modules/eks/cert-manager/README.md index 6fb90563a..6d1bf2f87 100644 --- a/modules/eks/cert-manager/README.md +++ b/modules/eks/cert-manager/README.md @@ -1,6 +1,16 @@ +--- +tags: + - component/eks/cert-manager + - layer/eks + - provider/aws + - provider/helm +--- + # Component: `eks/cert-manager` -This component creates a Helm release for [cert-manager](https://github.com/jetstack/cert-manager) on a Kubernetes cluster. [cert-manager](https://github.com/jetstack/cert-manager) is a Kubernetes addon that provisions X.509 certificates. +This component creates a Helm release for [cert-manager](https://github.com/jetstack/cert-manager) on a Kubernetes +cluster. [cert-manager](https://github.com/jetstack/cert-manager) is a Kubernetes addon that provisions X.509 +certificates. ## Usage @@ -17,38 +27,39 @@ import: The default catalog values `e.g. stacks/catalog/eks/cert-manager.yaml` ```yaml - enabled: true - name: cert-manager - kubernetes_namespace: cert-manager - # `helm_manifest_experiment_enabled` does not work with cert-manager or any Helm chart that uses CRDs - helm_manifest_experiment_enabled: false - # Use the cert-manager as a private CA (Certificate Authority) - # to issue certificates for use within the Kubernetes cluster. - # Something like this is required for the ALB Ingress Controller. - cert_manager_issuer_selfsigned_enabled: true - # Use Let's Encrypt to issue certificates for use outside the Kubernetes cluster, - # ones that will be trusted by browsers. - # These do not (yet) work with the ALB Ingress Controller, - # which require ACM certificates, so we have no use for them. - letsencrypt_enabled: true - # cert_manager_issuer_support_email_template is only used if letsencrypt_enabled is true. - # If it were true, we would want to set it at the organization level. - cert_manager_issuer_support_email_template: "aws+%s@acme.com" - cert_manager_repository: https://charts.jetstack.io - cert_manager_chart: cert-manager - cert_manager_chart_version: v1.5.4 - - # use a local chart to provision Certificate Issuers - cert_manager_issuer_chart: ./cert-manager-issuer/ - cert_manager_resources: - limits: - cpu: 200m - memory: 256Mi - requests: - cpu: 100m - memory: 128Mi +enabled: true +name: cert-manager +kubernetes_namespace: cert-manager +# `helm_manifest_experiment_enabled` does not work with cert-manager or any Helm chart that uses CRDs +helm_manifest_experiment_enabled: false +# Use the cert-manager as a private CA (Certificate Authority) +# to issue certificates for use within the Kubernetes cluster. +# Something like this is required for the ALB Ingress Controller. +cert_manager_issuer_selfsigned_enabled: true +# Use Let's Encrypt to issue certificates for use outside the Kubernetes cluster, +# ones that will be trusted by browsers. +# These do not (yet) work with the ALB Ingress Controller, +# which require ACM certificates, so we have no use for them. +letsencrypt_enabled: true +# cert_manager_issuer_support_email_template is only used if letsencrypt_enabled is true. +# If it were true, we would want to set it at the organization level. +cert_manager_issuer_support_email_template: "aws+%s@acme.com" +cert_manager_repository: https://charts.jetstack.io +cert_manager_chart: cert-manager +cert_manager_chart_version: v1.5.4 + +# use a local chart to provision Certificate Issuers +cert_manager_issuer_chart: ./cert-manager-issuer/ +cert_manager_resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi ``` + ## Requirements @@ -57,7 +68,7 @@ The default catalog values `e.g. stacks/catalog/eks/cert-manager.yaml` | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.9.0 | | [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.14.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.14.0, != 2.21.0 | ## Providers @@ -69,10 +80,10 @@ The default catalog values `e.g. stacks/catalog/eks/cert-manager.yaml` | Name | Source | Version | |------|--------|---------| -| [cert\_manager](#module\_cert\_manager) | cloudposse/helm-release/aws | 0.7.0 | -| [cert\_manager\_issuer](#module\_cert\_manager\_issuer) | cloudposse/helm-release/aws | 0.7.0 | -| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [cert\_manager](#module\_cert\_manager) | cloudposse/helm-release/aws | 0.10.0 | +| [cert\_manager\_issuer](#module\_cert\_manager\_issuer) | cloudposse/helm-release/aws | 0.10.0 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -113,17 +124,16 @@ The default catalog values `e.g. stacks/catalog/eks/cert-manager.yaml` | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -150,9 +160,10 @@ The default catalog values `e.g. stacks/catalog/eks/cert-manager.yaml` | [cert\_manager\_issuer\_metadata](#output\_cert\_manager\_issuer\_metadata) | Block status of the deployed release | | [cert\_manager\_metadata](#output\_cert\_manager\_metadata) | Block status of the deployed release | + ## References -* [cert-manager](https://github.com/jetstack/cert-manager) +- [cert-manager](https://github.com/jetstack/cert-manager) [](https://cpco.io/component) diff --git a/modules/eks/cert-manager/main.tf b/modules/eks/cert-manager/main.tf index f87e21c49..8cfad310f 100644 --- a/modules/eks/cert-manager/main.tf +++ b/modules/eks/cert-manager/main.tf @@ -9,7 +9,7 @@ data "aws_partition" "current" { module "cert_manager" { source = "cloudposse/helm-release/aws" - version = "0.7.0" + version = "0.10.0" name = "" # avoids hitting length restrictions on IAM Role names chart = var.cert_manager_chart @@ -108,7 +108,7 @@ module "cert_manager" { module "cert_manager_issuer" { source = "cloudposse/helm-release/aws" - version = "0.7.0" + version = "0.10.0" # Only install the issuer if either letsencrypt_installed or selfsigned_installed is true enabled = local.enabled && (var.letsencrypt_enabled || var.cert_manager_issuer_selfsigned_enabled) diff --git a/modules/eks/cert-manager/outputs.tf b/modules/eks/cert-manager/outputs.tf index 88830b0be..d9ef29c9a 100644 --- a/modules/eks/cert-manager/outputs.tf +++ b/modules/eks/cert-manager/outputs.tf @@ -7,4 +7,3 @@ output "cert_manager_issuer_metadata" { value = try(one(module.cert_manager_issuer.metadata), null) description = "Block status of the deployed release" } - diff --git a/modules/eks/cert-manager/provider-helm.tf b/modules/eks/cert-manager/provider-helm.tf index 9bb5edb6f..91cc7f6d4 100644 --- a/modules/eks/cert-manager/provider-helm.tf +++ b/modules/eks/cert-manager/provider-helm.tf @@ -21,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -42,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -51,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -101,16 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] # Provide dummy configuration for the case where the EKS cluster is not available. - certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") - eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -121,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -146,15 +180,16 @@ provider "helm" { provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/cert-manager/providers.tf b/modules/eks/cert-manager/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/eks/cert-manager/providers.tf +++ b/modules/eks/cert-manager/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/cert-manager/remote-state.tf b/modules/eks/cert-manager/remote-state.tf index 1901569b7..1e6842bfa 100644 --- a/modules/eks/cert-manager/remote-state.tf +++ b/modules/eks/cert-manager/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name @@ -9,7 +9,7 @@ module "eks" { module "dns_gbl_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = "dns-delegated" environment = "gbl" diff --git a/modules/eks/cert-manager/resources/cert-manager-values.yaml b/modules/eks/cert-manager/resources/cert-manager-values.yaml index d8fa6dd79..4e24e3d40 100644 --- a/modules/eks/cert-manager/resources/cert-manager-values.yaml +++ b/modules/eks/cert-manager/resources/cert-manager-values.yaml @@ -4,9 +4,8 @@ installCRDs: true serviceAccount: create: true securityContext: - enabled: true fsGroup: 1001 - runAsGroup: 1001 + runAsUser: 1001 prometheus: servicemonitor: prometheusInstance: default diff --git a/modules/eks/cert-manager/variables.tf b/modules/eks/cert-manager/variables.tf index c55b1f972..175f4c7b6 100644 --- a/modules/eks/cert-manager/variables.tf +++ b/modules/eks/cert-manager/variables.tf @@ -163,4 +163,3 @@ variable "eks_component_name" { description = "The name of the eks component" default = "eks/cluster" } - diff --git a/modules/eks/cert-manager/versions.tf b/modules/eks/cert-manager/versions.tf index 45b29866a..48fd8c954 100644 --- a/modules/eks/cert-manager/versions.tf +++ b/modules/eks/cert-manager/versions.tf @@ -12,7 +12,7 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.14.0" + version = ">= 2.14.0, != 2.21.0" } } } diff --git a/modules/eks/cluster/CHANGELOG.md b/modules/eks/cluster/CHANGELOG.md new file mode 100644 index 000000000..fc7e302c0 --- /dev/null +++ b/modules/eks/cluster/CHANGELOG.md @@ -0,0 +1,548 @@ +## Release 1.468.0 + +PR [#1072](https://github.com/cloudposse/terraform-aws-components/pull/1072) + +Bugfix: + +- Correctly map AWS SSO Permission Sets referenced by `aws_sso_permission_sets_rbac` to IAM Role ARNs. +- Broken in Release 1.431.1: Update to use AWS Auth API + +## Release 1.467.0 + +PR [#1071](https://github.com/cloudposse/terraform-aws-components/pull/1071) + +Bugfix: Update `cloudposse/eks-node-group/aws` to v3.0.1. + +- Fixes failure to create userdata for AL2 and Windows when using it to run `bootstrap.sh`. + +## Release 1.465.0 + +Components PR [#1069](https://github.com/cloudposse/terraform-aws-components/pull/1069) + +Update `cloudposse/eks-node-group/aws` to v3.0.0 + +- Enable use of Amazon Linux 2023 +- Other bug fixes and improvements +- See https://github.com/cloudposse/terraform-aws-eks-node-group/releases/tag/3.0.0 + +## Release 1.455.1 + +Components PR [#1057](https://github.com/cloudposse/terraform-aws-components/pull/1057) + +Fixed "Invalid count argument" argument when creating new cluster + +## Release 1.452.0 + +Components PR [#1046](https://github.com/cloudposse/terraform-aws-components/pull/1046) + +Added support for passing extra arguments to `kubelet` and other startup modifications supported by EKS on Amazon Linux +2 via the +[`bootstrap.sh`](https://github.com/awslabs/amazon-eks-ami/blob/d87c6c49638216907cbd6630b6cadfd4825aed20/templates/al2/runtime/bootstrap.sh) +script. + +This support should be considered an `alpha` version, as it may change when support for Amazon Linux 2023 is added, and +does not work with Bottlerocket. + +## Release 1.431.1: Breaking Changes + +Components PR [#1033](https://github.com/cloudposse/terraform-aws-components/pull/1033) + +### Major Breaking Changes + +> [!WARNING] +> +> #### Major Breaking Changes, Manual Intervention Required +> +> This release includes a major breaking change that requires manual intervention to migrate existing clusters. The +> change is necessary to support the new AWS Access Control API, which is more secure and more reliable than the old +> `aws-auth` ConfigMap. + +This release drops support for the `aws-auth` ConfigMap and switches to managing access control with the new AWS Access +Control API. This change allows for more secure and reliable access control, and removes the requirement that Terraform +operations on the EKS cluster itself require network access to the EKS control plane. + +In this release, this component only supports assigning "team roles" to Kubernetes RBAC groups. Support for AWS EKS +Access Policies is not yet implemented. However, if you specify `system:masters` as a group, that will be translated +into assigning the `AmazonEKSClusterAdminPolicy` to the role. Any other `system:*` group will cause an error. + +> [!TIP] +> +> #### Network Access Considerations +> +> Previously, this component required network access to the EKS control plane to manage the `aws-auth` ConfigMap. This +> meant having the EKS control plane accessible from the public internet, or using a bastion host or VPN to access the +> control plane. With the new AWS Access Control API, Terraform operations on the EKS cluster no longer require network +> access to the EKS control plane. +> +> This may seem like it makes it easier to secure the EKS control plane, but Terraform users will still require network +> access to the EKS control plane to manage any deployments or other Kubernetes resources in the cluster. This means +> that this upgrade does not substantially change the need for network access. + +### Minor Changes + +With the fixes included and AWS Terraform Provider v5.43.0 and Karpenter v0.33.0, the +`legacy_do_not_create_karpenter_instance_profile` is now obsolete. After upgrading both this component and the +`eks/karpenter` component, if you had it in your configuration, you can remove it. If you had previously set it to +`false`, removing it may cause an error when you apply the changes. If you see an error about the +`aws_iam_instance_profile` resource being destroyed (cannot be destroyed because it is in use, has dependencies, and/or +has role attached), you can simply remove the resource from the Terraform state with `[atmos] terraform state rm`, +because it will be managed by the Karpenter controller instead of Terraform. + +### Access Control API Migration Procedure + +Full details of the migration process can be found in the `cloudposse/terraform-aws-eks-cluster` +[migration document](https://github.com/cloudposse/terraform-aws-eks-cluster/blob/main/docs/migration-v3-v4.md). This +section is a streamlined version for users of this `eks/cluster` component. + +> [!IMPORTANT] +> +> The commands below assume the component is named "eks/cluster". If you are using a different name, replace +> "eks/cluster" with the correct component name. + +#### Prepare for Migration + +Make sure you have `kubectl` access to the cluster, preferably using the `aws eks get-token` command configured into +your `$KUBECONFIG` file. Geodesic users can usually set this up with + +```shell +atmos aws eks update-kubeconfig eks/cluster -s= +# or +set-cluster -- +``` + +Where `` is the "tenant" name, a.k.a. the "org" name, e.g. "core", and should be omitted (along with the hyphen) +if your organization does not use a tenant name. `` is the AWS region abbreviation your organization is using, +e.g. "usw2" or "uw2", and `` is the "stage" or "account" name, e.g. "auto" or "prod". + +Test your access with `kubectl` + +```shell +# check if you have any access at all. Should output "yes". +kubectl auth can-i -A create selfsubjectaccessreviews.authorization.k8s.io + +# Do you have full cluster administrator access? +kubectl auth can-i '*' '*' + +# Show me what I can and cannot do (if `rakkess` is installed) +rakkess + +``` + +#### Migrate + +1. Update the component (already done if you see this document). +2. Run `atmos terraform plan eks/cluster -s ` + +See this error: + +```plaintext +To work with module.eks_cluster.kubernetes_config_map.aws_auth[0] (orphan) its original provider configuration +``` + +Note, in other documentation, the exact "address" of the orphaned resource may be different, and the documentation may +say to refer to the address of the resource in the error message. In this case, because we are using this component as +the root module, the address should be exactly as shown above. (Possibly ending with `aws_auth_ignore_changes[0]` +instead of `aws_auth[0]`.) + +3. Remove the orphaned resource from the state file with + +``` +atmos terraform state rm eks/cluster 'module.eks_cluster.kubernetes_config_map.aws_auth[0]' -s +``` + +4. `atmos terraform plan eks/cluster -s ` + +Verify: + +- `module.eks_cluster.aws_eks_cluster.default[0]` will be updated in-place + - access_config.authentication_mode = "CONFIG_MAP" -> "API_AND_CONFIG_MAP" + +Stop and ask for help if you see `module.eks_cluster.aws_eks_cluster.default[0]` will be destroyed. Expect to see a lot +of IAM changes due to the potential for the EKS OIDC thumbprint to change, and a lot of `aws_eks_access_entry` +additions. You may also see: + +- `aws_security_group_rule` resources replaced by `aws_vpc_security_group_ingress_rule` resources +- `null_resource` resources destroyed + +5. Apply the plan with `atmos terraform apply eks/cluster -s --from-plan` + +**EXPECT AN ERROR**. Something like: + +```plaintext +β”‚ Error: creating EKS Access Entry +(eg-core-usw2-auto-eks-cluster:arn:aws:iam::123456789012:role/eg-core-gbl-auto-terraform): operation error EKS: CreateAccessEntry, https response error StatusCode: 409, RequestID: 97a40994-4223-4af1-977e-42ec57eb3ad6, ResourceInUseException: The specified access entry resource is already in use on this cluster. +β”‚ +β”‚ with module.eks_cluster.aws_eks_access_entry.map["arn:aws:iam::123456789012:role/eg-core-gbl-auto-terraform"], +β”‚ on .terraform/modules/eks_cluster/auth.tf line 60, in resource "aws_eks_access_entry" "map": +β”‚ 60: resource "aws_eks_access_entry" "map" { +``` + +This is expected. The access entry is something we want to control, but a duplicate is automatically created by AWS +during the conversion. Import the created entry. You may get other errors, but they are likely transient and will be +fixed automatically after fixing this one. + +The `access entry ID` to import is given in the error message in parentheses. In the example above, the ID is +`eg-core-usw2-auto-eks-cluster:arn:aws:iam::123456789012:role/eg-core-gbl-auto-terraform`. + +The Terraform `resource address` for the resource will also be in the error message: it is the part after "with". In the +example above, the address is + +```plaintext +module.eks_cluster.aws_eks_access_entry.map["arn:aws:iam::123456789012:role/eg-core-gbl-auto-terraform"] +``` + +Import the resource with + +```bash +atmos terraform import eks/cluster '' '' -s +``` + +It is critical to use single quotes around the resource address and access entry ID to prevent the shell from +interpreting the square brackets and colons and to preserve the double quotes in the resource address. + +After successfully importing the resource, run + +``` +atmos terraform apply eks/cluster -s ` +``` + +to apply tags to the entry and finish up any changes interrupted by the error. It should apply cleanly this time. + +#### Verify + +Verify that you still have access to the cluster with `kubectl`, just as you did in the "Prepare" section. + +#### Cleanup + +Either one cluster at a time, or later in an organization-wide cleanup, migrate all clusters from `API_AND_CONFIG_MAP` +to `API` authentication mode. + +At this point you have both the old and new access control methods enabled, but nothing is managing the `aws-auth` +ConfigMap. The `aws-auth` ConfigMap has been abandoned by this module and will no longer have entries added or, +crucially, removed. In order to remove this lingering unmanaged grant of access, migrate the cluster to `API` +authentication mode, and manually remove the `aws-auth` ConfigMap. + +- Update the `access.config.authentication_mode` to "API" in your configuration: + + ```yaml + access_config: + authentication_mode: API + ``` + + and run `atmos terraform apply` again. This will cause EKS to ignore the `aws-auth` ConfigMap, but will not remove it. + Again, this will cause a lot of IAM changes due to the potential for the EKS OIDC thumbprint to change, but this is + not a problem. + +- Manually remove the `aws-auth` ConfigMap. You can do this with + `kubectl delete configmap aws-auth --namespace kube-system`. This will not affect the cluster, because it is now being + managed by the new access control API, but it will reduce the possibility of confusion in the future. + +### End of Access Control API Migration + +--- + +## Changes in `v1.349.0` + +Components PR [#910](https://github.com/cloudposse/terraform-aws-components/pull/910) + +Bug fix and updates to Changelog, no action required. + +Fixed: Error about managed node group ARNs list being null, which could happen when adding a managed node group to an +existing cluster that never had one. + +## Changes in `v1.303.0` + +Components PR [#852](https://github.com/cloudposse/terraform-aws-components/pull/852) + +This is a bug fix and feature enhancement update. No action is necessary to upgrade. However, with the new features and +new recommendations, you may want to change your configuration. + +## Recommended (optional) changes + +Previously, we recommended deploying Karpenter to Fargate and not provisioning any nodes. However, this causes issues +with add-ons that require compute power to fully initialize, such as `coredns`, and it can reduce the cluster to a +single node, removing the high availability that comes from having a node per Availability Zone and replicas of pods +spread across those nodes. + +As a result, we now recommend deploying a minimal node group with a single instance (currently recommended to be a +`c7a.medium`) in each of 3 Availability Zones. This will provide the compute power needed to initialize add-ons, and +will provide high availability for the cluster. As a bonus, it will also remove the need to deploy Karpenter to Fargate. + +**NOTE about instance type**: The `c7a.medium` instance type is relatively new. If you have deployed an old version of +our [ServiceControlPolicy](https://github.com/cloudposse/terraform-aws-service-control-policies) +`DenyEC2NonNitroInstances`, `DenyNonNitroInstances` (obsolete, replaced by `DenyEC2NonNitroInstances`), and/or +`DenyEC2InstancesWithoutEncryptionInTransit`, you will want to update them to v0.14.1 or choose a different instance +type. + +### Migration procedure + +To perform the recommended migration, follow these steps: + +#### 1. Deploy a minimal node group, move addons to it + +Change your `eks/cluster` configuration to set `deploy_addons_to_fargate: false`. + +Add the following to your `eks/cluster` configuration, but copy the block device name, volume size, and volume type from +your existing Karpenter provisioner configuration. Also select the correct `ami_type` according to the `ami_family` in +your Karpenter provisioner configuration. + +```yaml +node_groups: + # will create 1 node group for each item in map + # Provision a minimal static node group for add-ons and redundant replicas + main: + # EKS AMI version to use, e.g. "1.16.13-20200821" (no "v"). + ami_release_version: null + # Type of Amazon Machine Image (AMI) associated with the EKS Node Group + # Typically AL2_x86_64 or BOTTLEROCKET_x86_64 + ami_type: BOTTLEROCKET_x86_64 + # Additional name attributes (e.g. `1`) for the node group + attributes: [] + # will create 1 auto scaling group in each specified availability zone + # or all AZs with subnets if none are specified anywhere + availability_zones: null + # Whether to enable Node Group to scale its AutoScaling Group + cluster_autoscaler_enabled: false + # True (recommended) to create new node_groups before deleting old ones, avoiding a temporary outage + create_before_destroy: true + # Configure storage for the root block device for instances in the Auto Scaling Group + # For Bottlerocket, use /dev/xvdb. For all others, use /dev/xvda. + block_device_map: + "/dev/xvdb": + ebs: + volume_size: 125 # in GiB + volume_type: gp3 + encrypted: true + delete_on_termination: true + # Set of instance types associated with the EKS Node Group. Terraform will only perform drift detection if a configuration value is provided. + instance_types: + - c6a.large + # Desired number of worker nodes when initially provisioned + desired_group_size: 3 + max_group_size: 3 + min_group_size: 3 + resources_to_tag: + - instance + - volume + tags: null +``` + +You do not need to apply the above changes yet, although you can if you want to. To reduce overhead, you can apply the +changes in the next step. + +#### 2. Move Karpenter to the node group, remove legacy support + +Delete the `fargate_profiles` section from your `eks/cluster` configuration, or at least remove the `karpenter` profile +from it. Disable legacy support by adding: + +```yaml +legacy_fargate_1_role_per_profile_enabled: false +``` + +#### 2.a Optional: Move Karpenter instance profile to `eks/cluster` component + +If you have the patience to manually import and remove a Terraform resource, you should move the Karpenter instance +profile to the `eks/cluster` component. This fixes an issue where the Karpenter instance profile could be broken by +certain sequences of Terraform operations. However, if you have multiple clusters to migrate, this can be tedious, and +the issue is not a serious one, so you may want to skip this step. + +To do this, add the following to your `eks/cluster` configuration: + +```yaml +legacy_do_not_create_karpenter_instance_profile: false +``` + +**BEFORE APPLYING CHANGES**: Run `atmos terraform plan` (with the appropriate arguments) to see the changes that will be +made. Among the resources to be created will be `aws_iam_instance_profile.default[0]`. Using the same arguments as +before, run `atmos`, but replace `plan` with `import 'aws_iam_instance_profile.default[0]' `, where +`` is the name of the profile the plan indicated it would create. It will be something like +`-karpenter`. + +**NOTE**: If you perform this step, you must also perform 3.a below. + +#### 2.b Apply the changes + +Apply the changes with `atmos terraform apply`. + +#### 3. Upgrade Karpenter + +Upgrade the `eks/karpenter` component to the latest version. Follow the upgrade instructions to enable the new +`karpenter-crd` chart by setting `crd_chart_enabled: true`. + +Upgrade to at least Karpenter v0.30.0, which is the first version to support factoring in the existing node group when +determining the number of nodes to provision. This will prevent Karpenter from provisioning nodes when they are not +needed because the existing node group already has enough capacity. Be careful about upgrading to v0.32.0 or later, as +that version introduces significant breaking changes. We recommend updating to v0.31.2 or later versions of v0.31.x, but +not v0.32.0 or later, as a first step. This provides a safe (revertible) upgrade path to v0.32.0 or later. + +#### 3.a Finish Move of Karpenter instance profile to `eks/cluster` component + +If you performed step 2.a above, you must also perform this step. If you did not perform step 2.a, you must NOT perform +this step. + +In the `eks/karpenter` stack, set `legacy_create_karpenter_instance_profile: false`. + +**BEFORE APPLYING CHANGES**: Remove the Karpenter instance profile from the Terraform state, since it is now managed by +the `eks/cluster` component, or else Terraform will delete it. + +```shell +atmos terraform state eks/karpenter rm 'aws_iam_instance_profile.default[0]' -s= +``` + +#### 3.b Apply the changes + +Apply the changes with `atmos terraform apply`. + +## Changes included in `v1.303.0` + +This is a bug fix and feature enhancement update. No action is necessary to upgrade. + +### Bug Fixes + +- Timeouts for Add-Ons are now honored (they were being ignored) +- If you supply a service account role ARN for an Add-On, it will be used, and no new role will be created. Previously + it was used, but the component created a new role anyway. +- The EKS EFS controller add-on cannot be deployed to Fargate, and enabling it along with `deploy_addons_to_fargate` + will no longer attempt to deploy EFS to Fargate. Note that this means to use the EFS Add-On, you must create a managed + node group. Track the status of this feature with + [this issue](https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/1100). +- If you are using an old VPC component that does not supply `az_private_subnets_map`, this module will now use the + older the `private_subnet_ids` output. + +### Add-Ons have `enabled` option + +The EKS Add-Ons now have an optional "enabled" flag (defaults to `true`) so that you can selectively disable them in a +stack where the inherited configuration has them enabled. + +## Upgrading to `v1.270.0` + +Components PR [#795](https://github.com/cloudposse/terraform-aws-components/pull/795) + +### Removed `identity` roles from cluster RBAC (`aws-auth` ConfigMap) + +Previously, this module added `identity` roles configured by the `aws_teams_rbac` input to the `aws-auth` ConfigMap. +This never worked, and so now `aws_teams_rbac` is ignored. When upgrading, you may see these roles being removed from +the `aws-auth`: this is expected and harmless. + +### Better support for Managed Node Group Block Device Specifications + +Previously, this module only supported specifying the disk size and encryption state for the root volume of Managed Node +Groups. Now, the full set of block device specifications is supported, including the ability to specify the device name. +This is particularly important when using BottleRocket, which uses a very small root volume for storing the OS and +configuration, and exposes a second volume (`/dev/xvdb`) for storing data. + +#### Block Device Migration + +Almost all of the attributes of `node_groups` and `node_group_defaults` are now optional. This means you can remove from +your configuration any attributes that previously you were setting to `null`. + +The `disk_size` and `disk_encryption_enabled` attributes are deprecated. They only apply to `/dev/xvda`, and only +provision a `gp2` volume. In order to provide backwards compatibility, they are still supported, and, when specified, +cause the new `block_device_map` attribute to be ignored. + +The new `block_device_map` attribute is a map of objects. The keys are the names of block devices, and the values are +objects with the attributes from the Terraform +[launch_template.block-devices](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template#block-devices) +resource. + +Note that the new default, when none of `block_device_map`, `disk_size`, or `disk_encryption_enabled` are specified, is +to provision a 20GB `gp3` volume for `/dev/xvda`, with encryption enabled. This is a change from the previous default, +which provisioned a `gp2` volume instead. + +### Support for EFS add-on + +This module now supports the EFS CSI driver add-on, in very much the same way as it supports the EBS CSI driver add-on. +The only difference is that the EFS CSI driver add-on requires that you first provision an EFS file system. + +#### Migration from `eks/efs-controller` to EFS CSI Driver Add-On + +If you are currently using the `eks/efs-controller` module, you can migrate to the EFS CSI Driver Add-On by following +these steps: + +1. Remove or scale to zero Pods any Deployments using the EFS file system. +2. Remove (`terraform destroy`) the `eks/efs-controller` module from your cluster. This will also remove the `efs-sc` + StorageClass. +3. Use the + [eks/storage-class](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/storage-class) + module to create a replacement EFS StorageClass `efs-sc`. This component is new and you may need to add it to your + cluster. +4. Deploy the EFS CSI Driver Add-On by adding `aws-efs-csi-driver` to the `addons` map (see `README`). +5. Restore the Deployments you modified in step 1. + +### More options for specifying Availability Zones + +Previously, this module required you to specify the Availability Zones for the cluster in one of two ways: + +1. Explicitly, by providing the full AZ names via the `availability_zones` input +2. Implicitly, via private subnets in the VPC + +Option 2 is still usually the best way, but now you have additional options: + +- You can specify the Availability Zones via the `availability_zones` input without specifying the full AZ names. You + can just specify the suffixes of the AZ names, and the module will find the full names for you, using the current + region. This is useful for using the same configuration in multiple regions. +- You can specify Availability Zone IDs via the `availability_zone_ids` input. This is useful to ensure that clusters in + different accounts are nevertheless deployed to the same Availability Zones. As with the `availability_zones` input, + you can specify the suffixes of the AZ IDs, and the module will find the full IDs for you, using the current region. + +### Support for Karpenter Instance Profile + +Previously, this module created an IAM Role for instances launched by Karpenter, but did not create the corresponding +Instance Profile, which was instead created by the `eks/karpenter` component. This can cause problems if you delete and +recreate the cluster, so for new clusters, this module can now create the Instance Profile as well. + +Because this is disruptive to existing clusters, this is not enabled by default. To enable it, set the +`legacy_do_not_create_karpenter_instance_profile` input to `false`, and also set the `eks/karpenter` input +`legacy_create_karpenter_instance_profile` to `false`. + +## Upgrading to `v1.250.0` + +Components PR [#723](https://github.com/cloudposse/terraform-aws-components/pull/723) + +### Improved support for EKS Add-Ons + +This has improved support for EKS Add-Ons. + +##### Configuration and Timeouts + +The `addons` input now accepts a `configuration_values` input to allow you to configure the add-ons, and various timeout +inputs to allow you to fine-tune the timeouts for the add-ons. + +##### Automatic IAM Role Creation + +If you enable `aws-ebs-csi-driver` or `vpc-cni` add-ons, the module will automatically create the required Service +Account IAM Role and attach it to the add-on. + +##### Add-Ons can be deployed to Fargate + +If you are using Karpenter and not provisioning any nodes with this module, the `coredns` and `aws-ebs-csi-driver` +add-ons can be deployed to Fargate. (They must be able to run somewhere in the cluster or else the deployment will +fail.) + +To cause the add-ons to be deployed to Fargate, set the `deploy_addons_to_fargate` input to `true`. + +**Note about CoreDNS**: If you want to deploy CoreDNS to Fargate, as of this writing you must set the +`configuration_values` input for CoreDNS to `'{"computeType": "Fargate"}'`. If you want to deploy CoreDNS to EC2 +instances, you must NOT include the `computeType` configuration value. + +### Availability Zones implied by Private Subnets + +You can now avoid specifying Availability Zones for the cluster anywhere. If all of the possible Availability Zones +inputs are empty, the module will use the Availability Zones implied by the private subnets. That is, it will deploy the +cluster to all of the Availability Zones in which the VPC has private subnets. + +### Optional support for 1 Fargate Pod Execution Role per Cluster + +Previously, this module created a separate Fargate Pod Execution Role for each Fargate Profile it created. This is +unnecessary, excessive, and can cause problems due to name collisions, but is otherwise merely inefficient, so it is not +important to fix this on existiong, working clusters. This update brings a feature that causes the module to create at +most 1 Fargate Pod Execution Role per cluster. + +**This change is recommended for all NEW clusters, but only NEW clusters**. Because it is a breaking change, it is not +enabled by default. To enable it, set the `legacy_fargate_1_role_per_profile_enabled` variable to `false`. + +**WARNING**: If you enable this feature on an existing cluster, and that cluster is using Karpenter, the update could +destroy all of your existing Karpenter-provisioned nodes. Depending on your Karpenter version, this could leave you with +stranded EC2 instances (still running, but not managed by Karpenter or visible to the cluster) and an interruption of +service, and possibly other problems. If you are using Karpenter and want to enable this feature, the safest way is to +destroy the existing cluster and create a new one with this feature enabled. diff --git a/modules/eks/cluster/README.md b/modules/eks/cluster/README.md index 43cf6b9f2..b4c723e2d 100644 --- a/modules/eks/cluster/README.md +++ b/modules/eks/cluster/README.md @@ -1,7 +1,23 @@ -# Component: `eks` +--- +tags: + - component/eks/cluster + - layer/eks + - provider/aws +--- -This component is responsible for provisioning an end-to-end EKS Cluster, including managed node groups. -NOTE: This component can only be deployed after logging in to AWS via Federated login with SAML (e.g. GSuite) or assuming an IAM role (e.g. from a CI/CD system). It cannot be deployed if you login to AWS via AWS SSO, the reason being is that on initial deployment, the EKS cluster will be owned by the assumed role that provisioned it. If this were to be the AWS SSO Role, then we risk losing access to the EKS cluster once the ARN of the AWS SSO Role eventually changes. +# Component: `eks/cluster` + +This component is responsible for provisioning an end-to-end EKS Cluster, including managed node groups and Fargate +profiles. + +> [!NOTE] +> +> #### Windows not supported +> +> This component has not been tested with Windows worker nodes of any launch type. Although upstream modules support +> Windows nodes, there are likely issues around incorrect or insufficient IAM permissions or other configuration that +> would need to be resolved for this component to properly configure the upstream modules for Windows nodes. If you need +> Windows nodes, please experiment and be on the lookout for issues, and then report any issues to Cloud Posse. ## Usage @@ -9,27 +25,159 @@ NOTE: This component can only be deployed after logging in to AWS via Federated Here's an example snippet for how to use this component. +This example expects the [Cloud Posse Reference Architecture](https://docs.cloudposse.com/) Identity and Network designs +deployed for mapping users to EKS service roles and granting access in a private network. In addition, this example has +the GitHub OIDC integration added and makes use of Karpenter to dynamically scale cluster nodes. + +For more on these requirements, see [Identity Reference Architecture](https://docs.cloudposse.com/layers/identity/), +[Network Reference Architecture](https://docs.cloudposse.com/layers/network/), the +[GitHub OIDC component](https://docs.cloudposse.com/components/library/aws/github-oidc-provider/), and the +[Karpenter component](https://docs.cloudposse.com/components/library/aws/eks/karpenter/). + +### Mixin pattern for Kubernetes version + +We recommend separating out the Kubernetes and related addons versions into a separate mixin (one per Kubernetes minor +version), to make it easier to run different versions in different environments, for example while testing a new +version. + +We also recommend leaving "resolve conflicts" settings unset and therefore using the default "OVERWRITE" setting because +any custom configuration that you would want to preserve should be managed by Terraform configuring the add-ons +directly. + +For example, create `catalog/eks/cluster/mixins/k8s-1-29.yaml` with the following content: + +```yaml +components: + terraform: + eks/cluster: + vars: + cluster_kubernetes_version: "1.29" + + # You can set all the add-on versions to `null` to use the latest version, + # but that introduces drift as new versions are released. As usual, we recommend + # pinning the versions to a specific version and upgrading when convenient. + + # Determine the latest version of the EKS add-ons for the specified Kubernetes version + # EKS_K8S_VERSION=1.29 # replace with your cluster version + # ADD_ON=vpc-cni # replace with the add-on name + # echo "${ADD_ON}:" && aws eks describe-addon-versions --kubernetes-version $EKS_K8S_VERSION --addon-name $ADD_ON \ + # --query 'addons[].addonVersions[].{Version: addonVersion, Defaultversion: compatibilities[0].defaultVersion}' --output table + + # To see versions for all the add-ons, wrap the above command in a for loop: + # for ADD_ON in vpc-cni kube-proxy coredns aws-ebs-csi-driver aws-efs-csi-driver; do + # echo "${ADD_ON}:" && aws eks describe-addon-versions --kubernetes-version $EKS_K8S_VERSION --addon-name $ADD_ON \ + # --query 'addons[].addonVersions[].{Version: addonVersion, Defaultversion: compatibilities[0].defaultVersion}' --output table + # done + + # To see the custom configuration schema for an add-on, run the following command: + # aws eks describe-addon-configuration --addon-name aws-ebs-csi-driver \ + # --addon-version v1.20.0-eksbuild.1 | jq '.configurationSchema | fromjson' + # See the `coredns` configuration below for an example of how to set a custom configuration. + + # https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html + # https://docs.aws.amazon.com/eks/latest/userguide/managing-add-ons.html#creating-an-add-on + addons: + # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html + # https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html + # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role + # https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#deploy-vpc-cni-managed-add-on + vpc-cni: + addon_version: "v1.16.0-eksbuild.1" # set `addon_version` to `null` to use the latest version + # https://docs.aws.amazon.com/eks/latest/userguide/managing-kube-proxy.html + kube-proxy: + addon_version: "v1.29.0-eksbuild.1" # set `addon_version` to `null` to use the latest version + # https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html + coredns: + addon_version: "v1.11.1-eksbuild.4" # set `addon_version` to `null` to use the latest version + ## override default replica count of 2. In very large clusters, you may want to increase this. + configuration_values: '{"replicaCount": 3}' + + # https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html + # https://aws.amazon.com/blogs/containers/amazon-ebs-csi-driver-is-now-generally-available-in-amazon-eks-add-ons + # https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html#csi-iam-role + # https://github.com/kubernetes-sigs/aws-ebs-csi-driver + aws-ebs-csi-driver: + addon_version: "v1.27.0-eksbuild.1" # set `addon_version` to `null` to use the latest version + # If you are not using [volume snapshots](https://kubernetes.io/blog/2020/12/10/kubernetes-1.20-volume-snapshot-moves-to-ga/#how-to-use-volume-snapshots) + # (and you probably are not), disable the EBS Snapshotter + # See https://github.com/aws/containers-roadmap/issues/1919 + configuration_values: '{"sidecars":{"snapshotter":{"forceEnable":false}}}' + + aws-efs-csi-driver: + addon_version: "v1.7.7-eksbuild.1" # set `addon_version` to `null` to use the latest version + # Set a short timeout in case of conflict with an existing efs-controller deployment + create_timeout: "7m" +``` + +### Common settings for all Kubernetes versions + +In your main stack configuration, you can then set the Kubernetes version by importing the appropriate mixin: + ```yaml +# +import: + - catalog/eks/cluster/mixins/k8s-1-29 + components: terraform: - eks: + eks/cluster: vars: enabled: true - cluster_kubernetes_version: "1.21" - availability_zones: ["us-west-2a", "us-west-2b", "us-west-2c"] + name: eks + vpc_component_name: "vpc" + eks_component_name: "eks/cluster" + + # Your choice of availability zones or availability zone ids + # availability_zones: ["us-east-1a", "us-east-1b", "us-east-1c"] + aws_ssm_agent_enabled: true + allow_ingress_from_vpc_accounts: + - tenant: core + stage: auto + - tenant: core + stage: corp + - tenant: core + stage: network + + public_access_cidrs: [] + allowed_cidr_blocks: [] + allowed_security_groups: [] + + enabled_cluster_log_types: + # Caution: enabling `api` log events may lead to a substantial increase in Cloudwatch Logs expenses. + - api + - audit + - authenticator + - controllerManager + - scheduler + oidc_provider_enabled: true - public_access_cidrs: ["72.107.0.0/24"] + + # Allows GitHub OIDC role + github_actions_iam_role_enabled: true + github_actions_iam_role_attributes: ["eks"] + github_actions_allowed_repos: + - acme/infra + + # We recommend, at a minimum, deploying 1 managed node group, + # with the same number of instances as availability zones (typically 3). managed_node_groups_enabled: true - node_groups: # null means use default set in defaults.auto.tf.vars + node_groups: # for most attributes, setting null here means use setting from node_group_defaults main: - # values of `null` will be replaced with default values - # availability_zones = null will create 1 auto scaling group in - # each availability zone in region_availability_zones + # availability_zones = null will create one autoscaling group + # in every private subnet in the VPC availability_zones: null - desired_group_size: 3 # number of instances to start with, must be >= number of AZs + # Tune the desired and minimum group size according to your baseload requirements. + # We recommend no autoscaling for the main node group, so it will + # stay at the specified desired group size, with additional + # capacity provided by Karpenter. Nevertheless, we recommend + # deploying enough capacity in the node group to handle your + # baseload requirements, and in production, we recommend you + # have a large enough node group to handle 3/2 (1.5) times your + # baseload requirements, to handle the loss of a single AZ. + desired_group_size: 3 # number of instances to start with, should be >= number of AZs min_group_size: 3 # must be >= number of AZs - max_group_size: 6 + max_group_size: 3 # Can only set one of ami_release_version or kubernetes_version # Leave both null to use latest AMI for Cluster Kubernetes version @@ -38,83 +186,381 @@ components: attributes: [] create_before_destroy: true - disk_size: 100 cluster_autoscaler_enabled: true instance_types: - - t3.medium + # Tune the instance type according to your baseload requirements. + - c7a.medium ami_type: AL2_x86_64 # use "AL2_x86_64" for standard instances, "AL2_x86_64_GPU" for GPU instances + node_userdata: + # WARNING: node_userdata is alpha status and will likely change in the future. + # Also, it is only supported for AL2 and some Windows AMIs, not BottleRocket or AL2023. + # Kubernetes docs: https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ + kubelet_extra_args: >- + --kube-reserved cpu=100m,memory=0.6Gi,ephemeral-storage=1Gi --system-reserved + cpu=100m,memory=0.2Gi,ephemeral-storage=1Gi --eviction-hard + memory.available<200Mi,nodefs.available<10%,imagefs.available<15% + block_device_map: + # EBS volume for local ephemeral storage + # IGNORED if legacy `disk_encryption_enabled` or `disk_size` are set! + # Use "/dev/xvda" for most of the instances (without local NVMe) + # using most of the Linuxes, "/dev/xvdb" for BottleRocket + "/dev/xvda": + ebs: + volume_size: 100 # number of GB + volume_type: gp3 + kubernetes_labels: {} kubernetes_taints: {} resources_to_tag: - instance - volume tags: null + + # The abbreviation method used for Availability Zones in your project. + # Used for naming resources in managed node groups. + # Either "short" or "fixed". + availability_zone_abbreviation_type: fixed + + cluster_private_subnets_only: true + cluster_encryption_config_enabled: true + cluster_endpoint_private_access: true + cluster_endpoint_public_access: false + cluster_log_retention_period: 90 + + # List of `aws-team-roles` (in the account where the EKS cluster is deployed) to map to Kubernetes RBAC groups + # You cannot set `system:*` groups here, except for `system:masters`. + # The `idp:*` roles referenced here are created by the `eks/idp-roles` component. + # While set here, the `idp:*` roles will have no effect until after + # the `eks/idp-roles` component is applied, which must be after the + # `eks/cluster` component is deployed. + aws_team_roles_rbac: + - aws_team_role: admin + groups: + - system:masters + - aws_team_role: poweruser + groups: + - idp:poweruser + - aws_team_role: observer + groups: + - idp:observer + - aws_team_role: planner + groups: + - idp:observer + - aws_team: terraform + groups: + - system:masters + + # Permission sets from AWS SSO allowing cluster access + # See `aws-sso` component. + aws_sso_permission_sets_rbac: + - aws_sso_permission_set: PowerUserAccess + groups: + - idp:poweruser + + # Set to false if you are not using Karpenter + karpenter_iam_role_enabled: true + + # All Fargate Profiles will use the same IAM Role when `legacy_fargate_1_role_per_profile_enabled` is set to false. + # Recommended for all new clusters, but will damage existing clusters provisioned with the legacy component. + legacy_fargate_1_role_per_profile_enabled: false + # While it is possible to deploy add-ons to Fargate Profiles, it is not recommended. Use a managed node group instead. + deploy_addons_to_fargate: false +``` + +### Amazon EKS End-of-Life Dates + +When picking a Kubernetes version, be sure to review the +[end-of-life dates for Amazon EKS](https://endoflife.date/amazon-eks). Refer to the chart below: + +| cycle | release | latest | latest release | eol | extended support | +| :---- | :--------: | :---------- | :------------: | :--------: | :--------------: | +| 1.29 | 2024-01-23 | 1.29-eks-6 | 2024-04-18 | 2025-03-23 | 2026-03-23 | +| 1.28 | 2023-09-26 | 1.28-eks-12 | 2024-04-18 | 2024-11-26 | 2025-11-26 | +| 1.27 | 2023-05-24 | 1.27-eks-16 | 2024-04-18 | 2024-07-24 | 2025-07-24 | +| 1.26 | 2023-04-11 | 1.26-eks-17 | 2024-04-18 | 2024-06-11 | 2025-06-11 | +| 1.25 | 2023-02-21 | 1.25-eks-18 | 2024-04-18 | 2024-05-01 | 2025-05-01 | +| 1.24 | 2022-11-15 | 1.24-eks-21 | 2024-04-18 | 2024-01-31 | 2025-01-31 | +| 1.23 | 2022-08-11 | 1.23-eks-23 | 2024-04-18 | 2023-10-11 | 2024-10-11 | +| 1.22 | 2022-04-04 | 1.22-eks-14 | 2023-06-30 | 2023-06-04 | 2024-09-01 | +| 1.21 | 2021-07-19 | 1.21-eks-18 | 2023-06-09 | 2023-02-16 | 2024-07-15 | +| 1.20 | 2021-05-18 | 1.20-eks-14 | 2023-05-05 | 2022-11-01 | False | +| 1.19 | 2021-02-16 | 1.19-eks-11 | 2022-08-15 | 2022-08-01 | False | +| 1.18 | 2020-10-13 | 1.18-eks-13 | 2022-08-15 | 2022-08-15 | False | + +\* This Chart was generated 2024-05-12 with [the `eol` tool](https://github.com/hugovk/norwegianblue). Install it with +`python3 -m pip install --upgrade norwegianblue` and create a new table by running `eol --md amazon-eks` locally, or +view the information by visiting [the endoflife website](https://endoflife.date/amazon-eks). + +You can also view the release and support timeline for +[the Kubernetes project itself](https://endoflife.date/kubernetes). + +### Using Addons + +EKS clusters support β€œAddons” that can be automatically installed on a cluster. Install these addons with the +[`var.addons` input](https://docs.cloudposse.com/components/library/aws/eks/cluster/#input_addons). + +> [!TIP] +> +> Run the following command to see all available addons, their type, and their publisher. You can also see the URL for +> addons that are available through the AWS Marketplace. Replace 1.27 with the version of your cluster. See +> [Creating an addon](https://docs.aws.amazon.com/eks/latest/userguide/managing-add-ons.html#creating-an-add-on) for +> more details. + +```shell +EKS_K8S_VERSION=1.29 # replace with your cluster version +aws eks describe-addon-versions --kubernetes-version $EKS_K8S_VERSION \ + --query 'addons[].{MarketplaceProductUrl: marketplaceInformation.productUrl, Name: addonName, Owner: owner Publisher: publisher, Type: type}' --output table ``` +> [!TIP] +> +> You can see which versions are available for each addon by executing the following commands. Replace 1.29 with the +> version of your cluster. + +```shell +EKS_K8S_VERSION=1.29 # replace with your cluster version +echo "vpc-cni:" && aws eks describe-addon-versions --kubernetes-version $EKS_K8S_VERSION --addon-name vpc-cni \ + --query 'addons[].addonVersions[].{Version: addonVersion, Defaultversion: compatibilities[0].defaultVersion}' --output table + +echo "kube-proxy:" && aws eks describe-addon-versions --kubernetes-version $EKS_K8S_VERSION --addon-name kube-proxy \ + --query 'addons[].addonVersions[].{Version: addonVersion, Defaultversion: compatibilities[0].defaultVersion}' --output table + +echo "coredns:" && aws eks describe-addon-versions --kubernetes-version $EKS_K8S_VERSION --addon-name coredns \ + --query 'addons[].addonVersions[].{Version: addonVersion, Defaultversion: compatibilities[0].defaultVersion}' --output table + +echo "aws-ebs-csi-driver:" && aws eks describe-addon-versions --kubernetes-version $EKS_K8S_VERSION --addon-name aws-ebs-csi-driver \ + --query 'addons[].addonVersions[].{Version: addonVersion, Defaultversion: compatibilities[0].defaultVersion}' --output table + +echo "aws-efs-csi-driver:" && aws eks describe-addon-versions --kubernetes-version $EKS_K8S_VERSION --addon-name aws-efs-csi-driver \ + --query 'addons[].addonVersions[].{Version: addonVersion, Defaultversion: compatibilities[0].defaultVersion}' --output table +``` + +Some add-ons accept additional configuration. For example, the `vpc-cni` addon accepts a `disableNetworking` parameter. +View the available configuration options (as JSON Schema) via the `aws eks describe-addon-configuration` command. For +example: + +```shell +aws eks describe-addon-configuration \ + --addon-name aws-ebs-csi-driver \ + --addon-version v1.20.0-eksbuild.1 | jq '.configurationSchema | fromjson' +``` + +You can then configure the add-on via the `configuration_values` input. For example: + +```yaml +aws-ebs-csi-driver: + configuration_values: '{"node": {"loggingFormat": "json"}}' +``` + +Configure the addons like the following example: + +```yaml +# https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html +# https://docs.aws.amazon.com/eks/latest/userguide/managing-add-ons.html#creating-an-add-on +# https://aws.amazon.com/blogs/containers/amazon-eks-add-ons-advanced-configuration/ +addons: + # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html + # https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html + # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role + # https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#deploy-vpc-cni-managed-add-on + vpc-cni: + addon_version: "v1.12.2-eksbuild.1" # set `addon_version` to `null` to use the latest version + # https://docs.aws.amazon.com/eks/latest/userguide/managing-kube-proxy.html + kube-proxy: + addon_version: "v1.25.6-eksbuild.1" # set `addon_version` to `null` to use the latest version + # https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html + coredns: + addon_version: "v1.9.3-eksbuild.2" # set `addon_version` to `null` to use the latest version + # Override default replica count of 2, to have one in each AZ + configuration_values: '{"replicaCount": 3}' + # https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html + # https://aws.amazon.com/blogs/containers/amazon-ebs-csi-driver-is-now-generally-available-in-amazon-eks-add-ons + # https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html#csi-iam-role + # https://github.com/kubernetes-sigs/aws-ebs-csi-driver + aws-ebs-csi-driver: + addon_version: "v1.19.0-eksbuild.2" # set `addon_version` to `null` to use the latest version + # If you are not using [volume snapshots](https://kubernetes.io/blog/2020/12/10/kubernetes-1.20-volume-snapshot-moves-to-ga/#how-to-use-volume-snapshots) + # (and you probably are not), disable the EBS Snapshotter with: + configuration_values: '{"sidecars":{"snapshotter":{"forceEnable":false}}}' +``` + +Some addons, such as CoreDNS, require at least one node to be fully provisioned first. See +[issue #170](https://github.com/cloudposse/terraform-aws-eks-cluster/issues/170) for more details. Set +`var.addons_depends_on` to `true` to require the Node Groups to be provisioned before addons. + +```yaml +addons_depends_on: true +addons: + coredns: + addon_version: "v1.8.7-eksbuild.1" +``` + +> [!WARNING] +> +> Addons may not be suitable for all use-cases! For example, if you are deploying Karpenter to Fargate and using +> Karpenter to provision all nodes, these nodes will never be available before the cluster component is deployed if you +> are using the CoreDNS addon (for example). +> +> This is one of the reasons we recommend deploying a managed node group: to ensure that the addons will become fully +> functional during deployment of the cluster. + +For more information on upgrading EKS Addons, see +["How to Upgrade EKS Cluster Addons"](https://docs.cloudposse.com/learn/maintenance/upgrades/how-to-upgrade-eks-cluster-addons/) + +### Adding and Configuring a new EKS Addon + +The component already supports all the EKS addons shown in the configurations above. To add a new EKS addon, not +supported by the cluster, add it to the `addons` map (`addons` variable): + +```yaml +addons: + my-addon: + addon_version: "..." +``` + +If the new addon requires an EKS IAM Role for Kubernetes Service Account, perform the following steps: + +- Add a file `addons-custom.tf` to the `eks/cluster` folder if not already present + +- In the file, add an IAM policy document with the permissions required for the addon, and use the `eks-iam-role` module + to provision an IAM Role for Kubernetes Service Account for the addon: + + ```hcl + data "aws_iam_policy_document" "my_addon" { + statement { + sid = "..." + effect = "Allow" + resources = ["..."] + + actions = [ + "...", + "..." + ] + } + } + + module "my_addon_eks_iam_role" { + source = "cloudposse/eks-iam-role/aws" + version = "2.1.0" + + eks_cluster_oidc_issuer_url = local.eks_cluster_oidc_issuer_url + + service_account_name = "..." + service_account_namespace = "..." + + aws_iam_policy_document = [one(data.aws_iam_policy_document.my_addon[*].json)] + + context = module.this.context + } + ``` + + For examples of how to configure the IAM role and IAM permissions for EKS addons, see [addons.tf](addons.tf). + +- Add a file `additional-addon-support_override.tf` to the `eks/cluster` folder if not already present + +- In the file, add the IAM Role for Kubernetes Service Account for the addon to the + `overridable_additional_addon_service_account_role_arn_map` map: + + ```hcl + locals { + overridable_additional_addon_service_account_role_arn_map = { + my-addon = module.my_addon_eks_iam_role.service_account_role_arn + } + } + ``` + +- This map will override the default map in the [additional-addon-support.tf](additional-addon-support.tf) file, and + will be merged into the final map together with the default EKS addons `vpc-cni` and `aws-ebs-csi-driver` (which this + component configures and creates IAM Roles for Kubernetes Service Accounts) + +- Follow the instructions in the [additional-addon-support.tf](additional-addon-support.tf) file if the addon may need + to be deployed to Fargate, or has dependencies that Terraform cannot detect automatically. + + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | | [aws](#requirement\_aws) | >= 4.9.0 | +| [random](#requirement\_random) | >= 3.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.9.0 | +| [random](#provider\_random) | >= 3.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [delegated\_roles](#module\_delegated\_roles) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | -| [eks\_cluster](#module\_eks\_cluster) | cloudposse/eks-cluster/aws | 2.5.0 | -| [fargate\_profile](#module\_fargate\_profile) | cloudposse/eks-fargate-profile/aws | 1.1.0 | +| [aws\_ebs\_csi\_driver\_eks\_iam\_role](#module\_aws\_ebs\_csi\_driver\_eks\_iam\_role) | cloudposse/eks-iam-role/aws | 2.1.1 | +| [aws\_ebs\_csi\_driver\_fargate\_profile](#module\_aws\_ebs\_csi\_driver\_fargate\_profile) | cloudposse/eks-fargate-profile/aws | 1.3.0 | +| [aws\_efs\_csi\_driver\_eks\_iam\_role](#module\_aws\_efs\_csi\_driver\_eks\_iam\_role) | cloudposse/eks-iam-role/aws | 2.1.1 | +| [coredns\_fargate\_profile](#module\_coredns\_fargate\_profile) | cloudposse/eks-fargate-profile/aws | 1.3.0 | +| [eks\_cluster](#module\_eks\_cluster) | cloudposse/eks-cluster/aws | 4.1.0 | +| [fargate\_pod\_execution\_role](#module\_fargate\_pod\_execution\_role) | cloudposse/eks-fargate-profile/aws | 1.3.0 | +| [fargate\_profile](#module\_fargate\_profile) | cloudposse/eks-fargate-profile/aws | 1.3.0 | +| [iam\_arns](#module\_iam\_arns) | ../../account-map/modules/roles-to-principals | n/a | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [karpenter\_label](#module\_karpenter\_label) | cloudposse/label/null | 0.25.0 | | [region\_node\_group](#module\_region\_node\_group) | ./modules/node_group_by_region | n/a | -| [team\_roles](#module\_team\_roles) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | -| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [utils](#module\_utils) | cloudposse/utils/aws | 1.3.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [vpc\_cni\_eks\_iam\_role](#module\_vpc\_cni\_eks\_iam\_role) | cloudposse/eks-iam-role/aws | 2.1.1 | +| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources | Name | Type | |------|------| +| [aws_iam_instance_profile.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | | [aws_iam_policy.ipv6_eks_cni_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.karpenter](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.amazon_ec2_container_registry_readonly](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.amazon_eks_worker_node_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.amazon_ssm_managed_instance_core](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.aws_ebs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.aws_efs_csi_driver](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.ipv6_eks_cni_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.vpc_cni](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [random_pet.camel_case_warning](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource | +| [aws_availability_zones.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_iam_policy_document.assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.ipv6_eks_cni_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.vpc_cni_ipv6](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_roles.sso_roles](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_roles) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [access\_config](#input\_access\_config) | Access configuration for the EKS cluster |
object({
authentication_mode = optional(string, "API")
bootstrap_cluster_creator_admin_permissions = optional(bool, false)
})
| `{}` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [addons](#input\_addons) | Manages [EKS addons](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources |
map(object({
enabled = optional(bool, true)
addon_version = optional(string, null)
# configuration_values is a JSON string, such as '{"computeType": "Fargate"}'.
configuration_values = optional(string, null)
# Set default resolve_conflicts to OVERWRITE because it is required on initial installation of
# add-ons that have self-managed versions installed by default (e.g. vpc-cni, coredns), and
# because any custom configuration that you would want to preserve should be managed by Terraform.
resolve_conflicts_on_create = optional(string, "OVERWRITE")
resolve_conflicts_on_update = optional(string, "OVERWRITE")
service_account_role_arn = optional(string, null)
create_timeout = optional(string, null)
update_timeout = optional(string, null)
delete_timeout = optional(string, null)
}))
| `{}` | no | +| [addons\_depends\_on](#input\_addons\_depends\_on) | If set `true` (recommended), all addons will depend on managed node groups provisioned by this component and therefore not be installed until nodes are provisioned.
See [issue #170](https://github.com/cloudposse/terraform-aws-eks-cluster/issues/170) for more details. | `bool` | `true` | no | | [allow\_ingress\_from\_vpc\_accounts](#input\_allow\_ingress\_from\_vpc\_accounts) | List of account contexts to pull VPC ingress CIDR and add to cluster security group.

e.g.

{
environment = "ue2",
stage = "auto",
tenant = "core"
} | `any` | `[]` | no | | [allowed\_cidr\_blocks](#input\_allowed\_cidr\_blocks) | List of CIDR blocks to be allowed to connect to the EKS cluster | `list(string)` | `[]` | no | | [allowed\_security\_groups](#input\_allowed\_security\_groups) | List of Security Group IDs to be allowed to connect to the EKS cluster | `list(string)` | `[]` | no | -| [apply\_config\_map\_aws\_auth](#input\_apply\_config\_map\_aws\_auth) | Whether to execute `kubectl apply` to apply the ConfigMap to allow worker nodes to join the EKS cluster | `bool` | `true` | no | +| [apply\_config\_map\_aws\_auth](#input\_apply\_config\_map\_aws\_auth) | (Obsolete) Whether to execute `kubectl apply` to apply the ConfigMap to allow worker nodes to join the EKS cluster.
This input is included to avoid breaking existing configurations that set it to `true`;
a value of `false` is no longer allowed.
This input is obsolete and will be removed in a future release. | `bool` | `true` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [availability\_zone\_abbreviation\_type](#input\_availability\_zone\_abbreviation\_type) | Type of Availability Zone abbreviation (either `fixed` or `short`) to use in names. See https://github.com/cloudposse/terraform-aws-utils for details. | `string` | `"fixed"` | no | -| [availability\_zones](#input\_availability\_zones) | AWS Availability Zones in which to deploy multi-AZ resources.
If not provided, resources will be provisioned in every private subnet in the VPC. | `list(string)` | `[]` | no | -| [aws\_auth\_yaml\_strip\_quotes](#input\_aws\_auth\_yaml\_strip\_quotes) | If true, remove double quotes from the generated aws-auth ConfigMap YAML to reduce spurious diffs in plans | `bool` | `true` | no | +| [availability\_zone\_ids](#input\_availability\_zone\_ids) | List of Availability Zones IDs where subnets will be created. Overrides `availability_zones`.
Can be the full name, e.g. `use1-az1`, or just the part after the AZ ID region code, e.g. `-az1`,
to allow reusable values across regions. Consider contention for resources and spot pricing in each AZ when selecting.
Useful in some regions when using only some AZs and you want to use the same ones across multiple accounts. | `list(string)` | `[]` | no | +| [availability\_zones](#input\_availability\_zones) | AWS Availability Zones in which to deploy multi-AZ resources.
Ignored if `availability_zone_ids` is set.
Can be the full name, e.g. `us-east-1a`, or just the part after the region, e.g. `a` to allow reusable values across regions.
If not provided, resources will be provisioned in every zone with a private subnet in the VPC. | `list(string)` | `[]` | no | | [aws\_ssm\_agent\_enabled](#input\_aws\_ssm\_agent\_enabled) | Set true to attach the required IAM policy for AWS SSM agent to each EC2 instance's IAM Role | `bool` | `false` | no | +| [aws\_sso\_permission\_sets\_rbac](#input\_aws\_sso\_permission\_sets\_rbac) | (Not Recommended): AWS SSO (IAM Identity Center) permission sets in the EKS deployment account to add to `aws-auth` ConfigMap.
Unfortunately, `aws-auth` ConfigMap does not support SSO permission sets, so we map the generated
IAM Role ARN corresponding to the permission set at the time Terraform runs. This is subject to change
when any changes are made to the AWS SSO configuration, invalidating the mapping, and requiring a
`terraform apply` in this project to update the `aws-auth` ConfigMap and restore access. |
list(object({
aws_sso_permission_set = string
groups = list(string)
}))
| `[]` | no | +| [aws\_team\_roles\_rbac](#input\_aws\_team\_roles\_rbac) | List of `aws-team-roles` (in the target AWS account) to map to Kubernetes RBAC groups. |
list(object({
aws_team_role = string
groups = list(string)
}))
| `[]` | no | | [cluster\_encryption\_config\_enabled](#input\_cluster\_encryption\_config\_enabled) | Set to `true` to enable Cluster Encryption Configuration | `bool` | `true` | no | | [cluster\_encryption\_config\_kms\_key\_deletion\_window\_in\_days](#input\_cluster\_encryption\_config\_kms\_key\_deletion\_window\_in\_days) | Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction | `number` | `10` | no | | [cluster\_encryption\_config\_kms\_key\_enable\_key\_rotation](#input\_cluster\_encryption\_config\_kms\_key\_enable\_key\_rotation) | Cluster Encryption Config KMS Key Resource argument - enable kms key rotation | `bool` | `true` | no | | [cluster\_encryption\_config\_kms\_key\_id](#input\_cluster\_encryption\_config\_kms\_key\_id) | KMS Key ID to use for cluster encryption config | `string` | `""` | no | | [cluster\_encryption\_config\_kms\_key\_policy](#input\_cluster\_encryption\_config\_kms\_key\_policy) | Cluster Encryption Config KMS Key Resource argument - key policy | `string` | `null` | no | -| [cluster\_encryption\_config\_resources](#input\_cluster\_encryption\_config\_resources) | Cluster Encryption Config Resources to encrypt, e.g. ['secrets'] | `list(any)` |
[
"secrets"
]
| no | +| [cluster\_encryption\_config\_resources](#input\_cluster\_encryption\_config\_resources) | Cluster Encryption Config Resources to encrypt, e.g. `["secrets"]` | `list(string)` |
[
"secrets"
]
| no | | [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is `false` | `bool` | `false` | no | | [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is `true` | `bool` | `true` | no | | [cluster\_kubernetes\_version](#input\_cluster\_kubernetes\_version) | Desired Kubernetes master version. If you do not specify a value, the latest available version is used | `string` | `null` | no | @@ -122,39 +568,33 @@ components: | [cluster\_private\_subnets\_only](#input\_cluster\_private\_subnets\_only) | Whether or not to enable private subnets or both public and private subnets | `bool` | `false` | no | | [color](#input\_color) | The cluster stage represented by a color; e.g. blue, green | `string` | `""` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [delegated\_iam\_roles](#input\_delegated\_iam\_roles) | Delegated IAM roles to add to `aws-auth` ConfigMap |
list(object({
role = string
groups = list(string)
}))
| `[]` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [deploy\_addons\_to\_fargate](#input\_deploy\_addons\_to\_fargate) | Set to `true` (not recommended) to deploy addons to Fargate instead of initial node pool | `bool` | `false` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [enabled\_cluster\_log\_types](#input\_enabled\_cluster\_log\_types) | A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`] | `list(string)` | `[]` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [fargate\_profile\_iam\_role\_kubernetes\_namespace\_delimiter](#input\_fargate\_profile\_iam\_role\_kubernetes\_namespace\_delimiter) | Delimiter for the Kubernetes namespace in the IAM Role name for Fargate Profiles | `string` | `"-"` | no | | [fargate\_profile\_iam\_role\_permissions\_boundary](#input\_fargate\_profile\_iam\_role\_permissions\_boundary) | If provided, all Fargate Profiles IAM roles will be created with this permissions boundary attached | `string` | `null` | no | | [fargate\_profiles](#input\_fargate\_profiles) | Fargate Profiles config |
map(object({
kubernetes_namespace = string
kubernetes_labels = map(string)
}))
| `{}` | no | -| [iam\_primary\_roles\_stage\_name](#input\_iam\_primary\_roles\_stage\_name) | The name of the stage where the IAM primary roles are provisioned | `string` | `"identity"` | no | -| [iam\_primary\_roles\_tenant\_name](#input\_iam\_primary\_roles\_tenant\_name) | The name of the tenant where the IAM primary roles are provisioned | `string` | `null` | no | -| [iam\_roles\_environment\_name](#input\_iam\_roles\_environment\_name) | The name of the environment where the IAM roles are provisioned | `string` | `"gbl"` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [karpenter\_iam\_role\_enabled](#input\_karpenter\_iam\_role\_enabled) | Flag to enable/disable creation of IAM role for EC2 Instance Profile that is attached to the nodes launched by Karpenter | `bool` | `false` | no | -| [kubeconfig\_file](#input\_kubeconfig\_file) | Name of `kubeconfig` file to use to configure Kubernetes provider | `string` | `""` | no | -| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | Set true to configure Kubernetes provider with a `kubeconfig` file specified by `kubeconfig_file`.
Mainly for when the standard configuration produces a Terraform error. | `bool` | `false` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [legacy\_do\_not\_create\_karpenter\_instance\_profile](#input\_legacy\_do\_not\_create\_karpenter\_instance\_profile) | **Obsolete:** The issues this was meant to mitigate were fixed in AWS Terraform Provider v5.43.0
and Karpenter v0.33.0. This variable will be removed in a future release.
Remove this input from your configuration and leave it at default.
**Old description:** When `true` (the default), suppresses creation of the IAM Instance Profile
for nodes launched by Karpenter, to preserve the legacy behavior of
the `eks/karpenter` component creating it.
Set to `false` to enable creation of the IAM Instance Profile, which
ensures that both the role and the instance profile have the same lifecycle,
and avoids AWS Provider issue [#32671](https://github.com/hashicorp/terraform-provider-aws/issues/32671).
Use in conjunction with `eks/karpenter` component `legacy_create_karpenter_instance_profile`. | `bool` | `true` | no | +| [legacy\_fargate\_1\_role\_per\_profile\_enabled](#input\_legacy\_fargate\_1\_role\_per\_profile\_enabled) | Set to `false` for new clusters to create a single Fargate Pod Execution role for the cluster.
Set to `true` for existing clusters to preserve the old behavior of creating
a Fargate Pod Execution role for each Fargate Profile. | `bool` | `true` | no | | [managed\_node\_groups\_enabled](#input\_managed\_node\_groups\_enabled) | Set false to prevent the creation of EKS managed node groups. | `bool` | `true` | no | -| [map\_additional\_aws\_accounts](#input\_map\_additional\_aws\_accounts) | Additional AWS account numbers to add to `aws-auth` ConfigMap | `list(string)` | `[]` | no | -| [map\_additional\_iam\_roles](#input\_map\_additional\_iam\_roles) | Additional IAM roles to add to `config-map-aws-auth` ConfigMap |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | -| [map\_additional\_iam\_users](#input\_map\_additional\_iam\_users) | Additional IAM users to add to `aws-auth` ConfigMap |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | -| [map\_additional\_worker\_roles](#input\_map\_additional\_worker\_roles) | AWS IAM Role ARNs of worker nodes to add to `aws-auth` ConfigMap | `list(string)` | `[]` | no | +| [map\_additional\_aws\_accounts](#input\_map\_additional\_aws\_accounts) | (Obsolete) Additional AWS accounts to grant access to the EKS cluster.
This input is included to avoid breaking existing configurations that
supplied an empty list, but the list is no longer allowed to have entries.
(It is not clear that it worked properly in earlier versions in any case.)
This component now only supports EKS access entries, which require full principal ARNs.
This input is deprecated and will be removed in a future release. | `list(string)` | `[]` | no | +| [map\_additional\_iam\_roles](#input\_map\_additional\_iam\_roles) | Additional IAM roles to grant access to the cluster.
*WARNING*: Full Role ARN, including path, is required for `rolearn`.
In earlier versions (with `aws-auth` ConfigMap), only the path
had to be removed from the Role ARN. The path is now required.
`username` is now ignored. This input is planned to be replaced
in a future release with a more flexible input structure that consolidates
`map_additional_iam_roles` and `map_additional_iam_users`. |
list(object({
rolearn = string
username = optional(string)
groups = list(string)
}))
| `[]` | no | +| [map\_additional\_iam\_users](#input\_map\_additional\_iam\_users) | Additional IAM roles to grant access to the cluster.
`username` is now ignored. This input is planned to be replaced
in a future release with a more flexible input structure that consolidates
`map_additional_iam_roles` and `map_additional_iam_users`. |
list(object({
userarn = string
username = optional(string)
groups = list(string)
}))
| `[]` | no | +| [map\_additional\_worker\_roles](#input\_map\_additional\_worker\_roles) | (Deprecated) AWS IAM Role ARNs of unmanaged Linux worker nodes to grant access to the EKS cluster.
In earlier versions, this could be used to grant access to worker nodes of any type
that were not managed by the EKS cluster. Now EKS requires that unmanaged worker nodes
be classified as Linux or Windows servers, in this input is temporarily retained
with the assumption that all worker nodes are Linux servers. (It is likely that
earlier versions did not work properly with Windows worker nodes anyway.)
This input is deprecated and will be removed in a future release.
In the future, this component will either have a way to separate Linux and Windows worker nodes,
or drop support for unmanaged worker nodes entirely. | `list(string)` | `[]` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [node\_group\_defaults](#input\_node\_group\_defaults) | Defaults for node groups in the cluster |
object({
ami_release_version = string
ami_type = string
attributes = list(string)
availability_zones = list(string) # set to null to use var.availability_zones
cluster_autoscaler_enabled = bool
create_before_destroy = bool
desired_group_size = number
disk_encryption_enabled = bool
disk_size = number
instance_types = list(string)
kubernetes_labels = map(string)
kubernetes_taints = list(object({
key = string
value = string
effect = string
}))
kubernetes_version = string # set to null to use cluster_kubernetes_version
max_group_size = number
min_group_size = number
resources_to_tag = list(string)
tags = map(string)
})
|
{
"ami_release_version": null,
"ami_type": null,
"attributes": null,
"availability_zones": null,
"cluster_autoscaler_enabled": true,
"create_before_destroy": true,
"desired_group_size": 1,
"disk_encryption_enabled": true,
"disk_size": 20,
"instance_types": [
"t3.medium"
],
"kubernetes_labels": null,
"kubernetes_taints": null,
"kubernetes_version": null,
"max_group_size": 100,
"min_group_size": null,
"resources_to_tag": null,
"tags": null
}
| no | -| [node\_groups](#input\_node\_groups) | List of objects defining a node group for the cluster |
map(object({
# EKS AMI version to use, e.g. "1.16.13-20200821" (no "v").
ami_release_version = string
# Type of Amazon Machine Image (AMI) associated with the EKS Node Group
ami_type = string
# Additional attributes (e.g. `1`) for the node group
attributes = list(string)
# will create 1 auto scaling group in each specified availability zone
availability_zones = list(string)
# Whether to enable Node Group to scale its AutoScaling Group
cluster_autoscaler_enabled = bool
# True to create new node_groups before deleting old ones, avoiding a temporary outage
create_before_destroy = bool
# Desired number of worker nodes when initially provisioned
desired_group_size = number
# Enable disk encryption for the created launch template (if we aren't provided with an existing launch template)
disk_encryption_enabled = bool
# Disk size in GiB for worker nodes. Terraform will only perform drift detection if a configuration value is provided.
disk_size = number
# Set of instance types associated with the EKS Node Group. Terraform will only perform drift detection if a configuration value is provided.
instance_types = list(string)
# Key-value mapping of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed
kubernetes_labels = map(string)
# List of objects describing Kubernetes taints.
kubernetes_taints = list(object({
key = string
value = string
effect = string
}))
# Desired Kubernetes master version. If you do not specify a value, the latest available version is used
kubernetes_version = string
# The maximum size of the AutoScaling Group
max_group_size = number
# The minimum size of the AutoScaling Group
min_group_size = number
# List of auto-launched resource types to tag
resources_to_tag = list(string)
tags = map(string)
}))
| `{}` | no | -| [oidc\_provider\_enabled](#input\_oidc\_provider\_enabled) | Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using kiam or kube2iam. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html | `bool` | n/a | yes | -| [primary\_iam\_roles](#input\_primary\_iam\_roles) | Primary IAM roles to add to `aws-auth` ConfigMap |
list(object({
role = string
groups = list(string)
}))
| `[]` | no | +| [node\_group\_defaults](#input\_node\_group\_defaults) | Defaults for node groups in the cluster |
object({
ami_release_version = optional(string, null)
ami_type = optional(string, null)
attributes = optional(list(string), null)
availability_zones = optional(list(string)) # set to null to use var.availability_zones
cluster_autoscaler_enabled = optional(bool, null)
create_before_destroy = optional(bool, null)
desired_group_size = optional(number, null)
instance_types = optional(list(string), null)
kubernetes_labels = optional(map(string), {})
kubernetes_taints = optional(list(object({
key = string
value = string
effect = string
})), [])
node_userdata = optional(object({
before_cluster_joining_userdata = optional(string)
bootstrap_extra_args = optional(string)
kubelet_extra_args = optional(string)
after_cluster_joining_userdata = optional(string)
}), {})
kubernetes_version = optional(string, null) # set to null to use cluster_kubernetes_version
max_group_size = optional(number, null)
min_group_size = optional(number, null)
resources_to_tag = optional(list(string), null)
tags = optional(map(string), null)

# block_device_map copied from cloudposse/terraform-aws-eks-node-group
# Keep in sync via copy and paste, but make optional
# Most of the time you want "/dev/xvda". For BottleRocket, use "/dev/xvdb".
block_device_map = optional(map(object({
no_device = optional(bool, null)
virtual_name = optional(string, null)
ebs = optional(object({
delete_on_termination = optional(bool, true)
encrypted = optional(bool, true)
iops = optional(number, null)
kms_key_id = optional(string, null)
snapshot_id = optional(string, null)
throughput = optional(number, null) # for gp3, MiB/s, up to 1000
volume_size = optional(number, 50) # disk size in GB
volume_type = optional(string, "gp3")

# Catch common camel case typos. These have no effect, they just generate better errors.
# It would be nice to actually use these, but volumeSize in particular is a number here
# and in most places it is a string with a unit suffix (e.g. 20Gi)
# Without these defined, they would be silently ignored and the default values would be used instead,
# which is difficult to debug.
deleteOnTermination = optional(any, null)
kmsKeyId = optional(any, null)
snapshotId = optional(any, null)
volumeSize = optional(any, null)
volumeType = optional(any, null)
}))
})), null)

# DEPRECATED: disk_encryption_enabled is DEPRECATED, use `block_device_map` instead.
disk_encryption_enabled = optional(bool, null)
# DEPRECATED: disk_size is DEPRECATED, use `block_device_map` instead.
disk_size = optional(number, null)
})
|
{
"block_device_map": {
"/dev/xvda": {
"ebs": {
"encrypted": true,
"volume_size": 20,
"volume_type": "gp2"
}
}
},
"desired_group_size": 1,
"instance_types": [
"t3.medium"
],
"kubernetes_version": null,
"max_group_size": 100
}
| no | +| [node\_groups](#input\_node\_groups) | List of objects defining a node group for the cluster |
map(object({
# EKS AMI version to use, e.g. "1.16.13-20200821" (no "v").
ami_release_version = optional(string, null)
# Type of Amazon Machine Image (AMI) associated with the EKS Node Group
ami_type = optional(string, null)
# Additional attributes (e.g. `1`) for the node group
attributes = optional(list(string), null)
# will create 1 auto scaling group in each specified availability zone
# or all AZs with subnets if none are specified anywhere
availability_zones = optional(list(string), null)
# Whether to enable Node Group to scale its AutoScaling Group
cluster_autoscaler_enabled = optional(bool, null)
# True to create new node_groups before deleting old ones, avoiding a temporary outage
create_before_destroy = optional(bool, null)
# Desired number of worker nodes when initially provisioned
desired_group_size = optional(number, null)
# Set of instance types associated with the EKS Node Group. Terraform will only perform drift detection if a configuration value is provided.
instance_types = optional(list(string), null)
# Key-value mapping of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed
kubernetes_labels = optional(map(string), null)
# List of objects describing Kubernetes taints.
kubernetes_taints = optional(list(object({
key = string
value = string
effect = string
})), null)
node_userdata = optional(object({
before_cluster_joining_userdata = optional(string)
bootstrap_extra_args = optional(string)
kubelet_extra_args = optional(string)
after_cluster_joining_userdata = optional(string)
}), {})
# Desired Kubernetes master version. If you do not specify a value, the latest available version is used
kubernetes_version = optional(string, null)
# The maximum size of the AutoScaling Group
max_group_size = optional(number, null)
# The minimum size of the AutoScaling Group
min_group_size = optional(number, null)
# List of auto-launched resource types to tag
resources_to_tag = optional(list(string), null)
tags = optional(map(string), null)

# block_device_map copied from cloudposse/terraform-aws-eks-node-group
# Keep in sync via copy and paste, but make optional.
# Most of the time you want "/dev/xvda". For BottleRocket, use "/dev/xvdb".
block_device_map = optional(map(object({
no_device = optional(bool, null)
virtual_name = optional(string, null)
ebs = optional(object({
delete_on_termination = optional(bool, true)
encrypted = optional(bool, true)
iops = optional(number, null)
kms_key_id = optional(string, null)
snapshot_id = optional(string, null)
throughput = optional(number, null) # for gp3, MiB/s, up to 1000
volume_size = optional(number, 20) # Disk size in GB
volume_type = optional(string, "gp3")

# Catch common camel case typos. These have no effect, they just generate better errors.
# It would be nice to actually use these, but volumeSize in particular is a number here
# and in most places it is a string with a unit suffix (e.g. 20Gi)
# Without these defined, they would be silently ignored and the default values would be used instead,
# which is difficult to debug.
deleteOnTermination = optional(any, null)
kmsKeyId = optional(any, null)
snapshotId = optional(any, null)
volumeSize = optional(any, null)
volumeType = optional(any, null)
}))
})), null)

# DEPRECATED:
# Enable disk encryption for the created launch template (if we aren't provided with an existing launch template)
# DEPRECATED: disk_encryption_enabled is DEPRECATED, use `block_device_map` instead.
disk_encryption_enabled = optional(bool, null)
# Disk size in GiB for worker nodes. Terraform will only perform drift detection if a configuration value is provided.
# DEPRECATED: disk_size is DEPRECATED, use `block_device_map` instead.
disk_size = optional(number, null)

}))
| `{}` | no | +| [oidc\_provider\_enabled](#input\_oidc\_provider\_enabled) | Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using kiam or kube2iam. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html | `bool` | `true` | no | | [public\_access\_cidrs](#input\_public\_access\_cidrs) | Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0. | `list(string)` |
[
"0.0.0.0/0"
]
| no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | @@ -162,11 +602,14 @@ components: | [subnet\_type\_tag\_key](#input\_subnet\_type\_tag\_key) | The tag used to find the private subnets to find by availability zone. If null, will be looked up in vpc outputs. | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | The name of the vpc component | `string` | `"vpc"` | no | ## Outputs | Name | Description | |------|-------------| +| [availability\_zones](#output\_availability\_zones) | Availability Zones in which the cluster is provisioned | +| [eks\_addons\_versions](#output\_eks\_addons\_versions) | Map of enabled EKS Addons names and versions | | [eks\_auth\_worker\_roles](#output\_eks\_auth\_worker\_roles) | List of worker IAM roles that were included in the `auth-map` ConfigMap. | | [eks\_cluster\_arn](#output\_eks\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster | | [eks\_cluster\_certificate\_authority\_data](#output\_eks\_cluster\_certificate\_authority\_data) | The Kubernetes cluster certificate authority data | @@ -186,10 +629,17 @@ components: | [fargate\_profiles](#output\_fargate\_profiles) | Fargate Profiles | | [karpenter\_iam\_role\_arn](#output\_karpenter\_iam\_role\_arn) | Karpenter IAM Role ARN | | [karpenter\_iam\_role\_name](#output\_karpenter\_iam\_role\_name) | Karpenter IAM Role name | +| [vpc\_cidr](#output\_vpc\_cidr) | The CIDR of the VPC where this cluster is deployed. | + + +## Related How-to Guides + +- [EKS Foundational Platform](https://docs.cloudposse.com/layers/eks/) ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eks/cluster) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/cluster) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/eks/cluster/additional-addon-support.tf b/modules/eks/cluster/additional-addon-support.tf new file mode 100644 index 000000000..3fa6f4b40 --- /dev/null +++ b/modules/eks/cluster/additional-addon-support.tf @@ -0,0 +1,31 @@ +locals { + # If you have custom addons, create a file called `additional-addon-support_override.tf` + # and in that file override any of the following declarations as needed. + + + # Set `overridable_deploy_additional_addons_to_fargate` to indicate whether or not + # there are custom addons that should be deployed to Fargate on nodeless clusters. + overridable_deploy_additional_addons_to_fargate = false + + # `overridable_additional_addon_service_account_role_arn_map` is a map of addon names + # to the service account role ARNs they use. + # See the README for more details. + overridable_additional_addon_service_account_role_arn_map = { + # Example: + # my-addon = module.my_addon_eks_iam_role.service_account_role_arn + } + + # If you are creating Fargate profiles for your addons, + # use "cloudposse/eks-fargate-profile/aws" to create them + # and set `overridable_additional_addon_fargate_profiles` to a map of addon names + # to the corresponding eks-fargate-profile module output. + overridable_additional_addon_fargate_profiles = { + # Example: + # my-addon = module.my_addon_fargate_profile + } + + # If you have additional dependencies that must be created before the addons are deployed, + # override this declaration by creating a file called `additional-addon-support_override.tf` + # and setting `overridable_addons_depends_on` appropriately. + overridable_addons_depends_on = [] +} diff --git a/modules/eks/cluster/addons.tf b/modules/eks/cluster/addons.tf new file mode 100644 index 000000000..078820a93 --- /dev/null +++ b/modules/eks/cluster/addons.tf @@ -0,0 +1,221 @@ +# https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html +# https://docs.aws.amazon.com/eks/latest/userguide/managing-add-ons.html#creating-an-add-on + +locals { + eks_cluster_oidc_issuer_url = local.enabled ? replace(module.eks_cluster.eks_cluster_identity_oidc_issuer, "https://", "") : "" + eks_cluster_id = local.enabled ? module.eks_cluster.eks_cluster_id : "" + + addon_names = [for k, v in var.addons : k if v.enabled] + vpc_cni_addon_enabled = local.enabled && contains(local.addon_names, "vpc-cni") + aws_ebs_csi_driver_enabled = local.enabled && contains(local.addon_names, "aws-ebs-csi-driver") + aws_efs_csi_driver_enabled = local.enabled && contains(local.addon_names, "aws-efs-csi-driver") + coredns_enabled = local.enabled && contains(local.addon_names, "coredns") + + # The `vpc-cni`, `aws-ebs-csi-driver`, and `aws-efs-csi-driver` addons are special as they always require an + # IAM role for Kubernetes Service Account (IRSA). The roles are created by this component unless ARNs are provided. + # Use "?" operator to avoid evaluating map lookup when entry is missing + vpc_cni_sa_needed = local.vpc_cni_addon_enabled ? lookup(var.addons["vpc-cni"], "service_account_role_arn", null) == null : false + ebs_csi_sa_needed = local.aws_ebs_csi_driver_enabled ? lookup(var.addons["aws-ebs-csi-driver"], "service_account_role_arn", null) == null : false + efs_csi_sa_needed = local.aws_efs_csi_driver_enabled ? lookup(var.addons["aws-efs-csi-driver"], "service_account_role_arn", null) == null : false + addon_service_account_role_arn_map = { + vpc-cni = module.vpc_cni_eks_iam_role.service_account_role_arn + aws-ebs-csi-driver = module.aws_ebs_csi_driver_eks_iam_role.service_account_role_arn + aws-efs-csi-driver = module.aws_efs_csi_driver_eks_iam_role.service_account_role_arn + } + + final_addon_service_account_role_arn_map = merge(local.addon_service_account_role_arn_map, local.overridable_additional_addon_service_account_role_arn_map) + + addons = [ + for k, v in var.addons : { + addon_name = k + addon_version = lookup(v, "addon_version", null) + configuration_values = lookup(v, "configuration_values", null) + resolve_conflicts_on_create = lookup(v, "resolve_conflicts_on_create", null) + resolve_conflicts_on_update = lookup(v, "resolve_conflicts_on_update", null) + service_account_role_arn = try(coalesce(lookup(v, "service_account_role_arn", null), lookup(local.final_addon_service_account_role_arn_map, k, null)), null) + create_timeout = lookup(v, "create_timeout", null) + update_timeout = lookup(v, "update_timeout", null) + delete_timeout = lookup(v, "delete_timeout", null) + + } if v.enabled + ] + + addons_depends_on = concat([ + module.vpc_cni_eks_iam_role, + module.coredns_fargate_profile, + module.aws_ebs_csi_driver_eks_iam_role, + module.aws_ebs_csi_driver_fargate_profile, + module.aws_efs_csi_driver_eks_iam_role, + ], local.overridable_addons_depends_on) + + addons_require_fargate = var.deploy_addons_to_fargate && ( + local.coredns_enabled || + local.aws_ebs_csi_driver_enabled || + # as of EFS add-on v1.5.8, it cannot be deployed to Fargate + # local.aws_efs_csi_driver_enabled || + local.overridable_deploy_additional_addons_to_fargate + ) + addon_fargate_profiles = merge( + (local.coredns_enabled && var.deploy_addons_to_fargate ? { + coredns = one(module.coredns_fargate_profile[*]) + } : {}), + (local.aws_ebs_csi_driver_enabled && var.deploy_addons_to_fargate ? { + aws_ebs_csi_driver = one(module.aws_ebs_csi_driver_fargate_profile[*]) + } : {}), + # as of EFS add-on v1.5.8, it cannot be deployed to Fargate + # See https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/1100 + # (local.aws_efs_csi_driver_enabled && var.deploy_addons_to_fargate ? { + # aws_efs_csi_driver = one(module.aws_efs_csi_driver_fargate_profile[*]) + # } : {}), + local.overridable_additional_addon_fargate_profiles + ) +} + +# `vpc-cni` EKS addon +# https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html +# https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html +# https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role +# https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#deploy-vpc-cni-managed-add-on +data "aws_iam_policy_document" "vpc_cni_ipv6" { + count = local.vpc_cni_sa_needed ? 1 : 0 + + # See https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy + statement { + sid = "" + effect = "Allow" + resources = ["*"] + + actions = [ + "ec2:AssignIpv6Addresses", + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeInstanceTypes" + ] + } + + statement { + sid = "" + effect = "Allow" + resources = ["arn:aws:ec2:*:*:network-interface/*"] + actions = ["ec2:CreateTags"] + } +} + +resource "aws_iam_role_policy_attachment" "vpc_cni" { + count = local.vpc_cni_sa_needed ? 1 : 0 + + role = module.vpc_cni_eks_iam_role.service_account_role_name + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" +} + +module "vpc_cni_eks_iam_role" { + source = "cloudposse/eks-iam-role/aws" + version = "2.1.1" + + enabled = local.vpc_cni_sa_needed + + eks_cluster_oidc_issuer_url = local.eks_cluster_oidc_issuer_url + + service_account_name = "aws-node" + service_account_namespace = "kube-system" + + aws_iam_policy_document = [one(data.aws_iam_policy_document.vpc_cni_ipv6[*].json)] + + context = module.this.context +} + +module "coredns_fargate_profile" { + count = local.coredns_enabled && var.deploy_addons_to_fargate ? 1 : 0 + + source = "cloudposse/eks-fargate-profile/aws" + version = "1.3.0" + + subnet_ids = local.private_subnet_ids + cluster_name = local.eks_cluster_id + kubernetes_namespace = "kube-system" + kubernetes_labels = { k8s-app = "kube-dns" } + permissions_boundary = var.fargate_profile_iam_role_permissions_boundary + iam_role_kubernetes_namespace_delimiter = var.fargate_profile_iam_role_kubernetes_namespace_delimiter + + fargate_profile_name = "${local.eks_cluster_id}-coredns" + fargate_pod_execution_role_enabled = false + fargate_pod_execution_role_arn = one(module.fargate_pod_execution_role[*].eks_fargate_pod_execution_role_arn) + + attributes = ["coredns"] + context = module.this.context +} + +# `aws-ebs-csi-driver` EKS addon +# https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html +# https://aws.amazon.com/blogs/containers/amazon-ebs-csi-driver-is-now-generally-available-in-amazon-eks-add-ons +# https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html#csi-iam-role +# https://github.com/kubernetes-sigs/aws-ebs-csi-driver +resource "aws_iam_role_policy_attachment" "aws_ebs_csi_driver" { + count = local.ebs_csi_sa_needed ? 1 : 0 + + role = module.aws_ebs_csi_driver_eks_iam_role.service_account_role_name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" +} + +module "aws_ebs_csi_driver_eks_iam_role" { + source = "cloudposse/eks-iam-role/aws" + version = "2.1.1" + + enabled = local.ebs_csi_sa_needed + + eks_cluster_oidc_issuer_url = local.eks_cluster_oidc_issuer_url + + service_account_name = "ebs-csi-controller-sa" + service_account_namespace = "kube-system" + + context = module.this.context +} + +module "aws_ebs_csi_driver_fargate_profile" { + count = local.aws_ebs_csi_driver_enabled && var.deploy_addons_to_fargate ? 1 : 0 + + source = "cloudposse/eks-fargate-profile/aws" + version = "1.3.0" + + subnet_ids = local.private_subnet_ids + cluster_name = local.eks_cluster_id + kubernetes_namespace = "kube-system" + kubernetes_labels = { app = "ebs-csi-controller" } # Only deploy the controller to Fargate, not the node driver + permissions_boundary = var.fargate_profile_iam_role_permissions_boundary + + iam_role_kubernetes_namespace_delimiter = var.fargate_profile_iam_role_kubernetes_namespace_delimiter + + fargate_profile_name = "${local.eks_cluster_id}-ebs-csi" + fargate_pod_execution_role_enabled = false + fargate_pod_execution_role_arn = one(module.fargate_pod_execution_role[*].eks_fargate_pod_execution_role_arn) + + attributes = ["ebs-csi"] + context = module.this.context +} + +# `aws-efs-csi-driver` EKS addon +# https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html +# https://github.com/kubernetes-sigs/aws-efs-csi-driver +resource "aws_iam_role_policy_attachment" "aws_efs_csi_driver" { + count = local.efs_csi_sa_needed ? 1 : 0 + + role = module.aws_efs_csi_driver_eks_iam_role.service_account_role_name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEFSCSIDriverPolicy" +} + +module "aws_efs_csi_driver_eks_iam_role" { + source = "cloudposse/eks-iam-role/aws" + version = "2.1.1" + + enabled = local.efs_csi_sa_needed + + eks_cluster_oidc_issuer_url = local.eks_cluster_oidc_issuer_url + + service_account_namespace_name_list = [ + "kube-system:efs-csi-controller-sa", + "kube-system:efs-csi-node-sa", + ] + + context = module.this.context +} diff --git a/modules/eks/cluster/aws-sso.tf b/modules/eks/cluster/aws-sso.tf new file mode 100644 index 000000000..5e2eaf36f --- /dev/null +++ b/modules/eks/cluster/aws-sso.tf @@ -0,0 +1,26 @@ +# This is split off into a separate file in the hopes we can drop it altogether in the future, +# or else move it into `roles-to-principals`. + +locals { + + aws_sso_access_entry_map = { + for role in var.aws_sso_permission_sets_rbac : tolist(data.aws_iam_roles.sso_roles[role.aws_sso_permission_set].arns)[0] => { + kubernetes_groups = role.groups + } + } +} + +data "aws_iam_roles" "sso_roles" { + for_each = toset(var.aws_sso_permission_sets_rbac[*].aws_sso_permission_set) + name_regex = format("AWSReservedSSO_%s_.*", each.value) + path_prefix = "/aws-reserved/sso.amazonaws.com/" + + lifecycle { + postcondition { + condition = length(self.arns) == 1 + error_message = length(self.arns) == 0 ? "Could not find Role ARN for the AWS SSO permission set: ${each.value}" : ( + "Found more than one (${length(self.arns)}) Role ARN for the AWS SSO permission set: ${each.value}" + ) + } + } +} diff --git a/modules/eks/cluster/eks-node-groups.tf b/modules/eks/cluster/eks-node-groups.tf index f061f7cf1..a0d43ea77 100644 --- a/modules/eks/cluster/eks-node-groups.tf +++ b/modules/eks/cluster/eks-node-groups.tf @@ -1,7 +1,7 @@ locals { node_groups_enabled = local.enabled && var.managed_node_groups_enabled - node_group_default_availability_zones = var.node_group_defaults.availability_zones == null ? var.availability_zones : var.node_group_defaults.availability_zones + node_group_default_availability_zones = var.node_group_defaults.availability_zones == null ? local.availability_zones : var.node_group_defaults.availability_zones node_group_default_kubernetes_version = var.node_group_defaults.kubernetes_version == null ? var.cluster_kubernetes_version : var.node_group_defaults.kubernetes_version # values(module.region_node_group) is an array of `region_node_group` objects @@ -21,7 +21,9 @@ module "region_node_group" { source = "./modules/node_group_by_region" availability_zones = each.value.availability_zones == null ? local.node_group_default_availability_zones : each.value.availability_zones - attributes = flatten(concat(var.attributes, [each.key], [var.color], each.value.attributes == null ? var.node_group_defaults.attributes : each.value.attributes)) + attributes = flatten(concat(var.attributes, [each.key], [ + var.color + ], each.value.attributes == null ? var.node_group_defaults.attributes : each.value.attributes)) node_group_size = module.this.enabled ? { desired_size = each.value.desired_group_size == null ? var.node_group_defaults.desired_group_size : each.value.desired_group_size @@ -38,23 +40,96 @@ module "region_node_group" { ami_type = each.value.ami_type == null ? var.node_group_defaults.ami_type : each.value.ami_type az_abbreviation_type = var.availability_zone_abbreviation_type cluster_autoscaler_enabled = each.value.cluster_autoscaler_enabled == null ? var.node_group_defaults.cluster_autoscaler_enabled : each.value.cluster_autoscaler_enabled - cluster_name = module.eks_cluster.eks_cluster_id + cluster_name = local.eks_cluster_id create_before_destroy = each.value.create_before_destroy == null ? var.node_group_defaults.create_before_destroy : each.value.create_before_destroy - disk_encryption_enabled = each.value.disk_encryption_enabled == null ? var.node_group_defaults.disk_encryption_enabled : each.value.disk_encryption_enabled - disk_size = each.value.disk_size == null ? var.node_group_defaults.disk_size : each.value.disk_size instance_types = each.value.instance_types == null ? var.node_group_defaults.instance_types : each.value.instance_types kubernetes_labels = each.value.kubernetes_labels == null ? var.node_group_defaults.kubernetes_labels : each.value.kubernetes_labels kubernetes_taints = each.value.kubernetes_taints == null ? var.node_group_defaults.kubernetes_taints : each.value.kubernetes_taints + node_userdata = each.value.node_userdata == null ? var.node_group_defaults.node_userdata : each.value.node_userdata kubernetes_version = each.value.kubernetes_version == null ? local.node_group_default_kubernetes_version : each.value.kubernetes_version resources_to_tag = each.value.resources_to_tag == null ? var.node_group_defaults.resources_to_tag : each.value.resources_to_tag subnet_type_tag_key = local.subnet_type_tag_key aws_ssm_agent_enabled = var.aws_ssm_agent_enabled vpc_id = local.vpc_id - # See "Ensure ordering of resource creation" comment above for explanation - # of "module_depends_on" - module_depends_on = module.eks_cluster.kubernetes_config_map_id + block_device_map = lookup(local.legacy_converted_block_device_map, each.key, local.block_device_map_w_defaults[each.key]) } : null context = module.this.context } + +## Warn if you are using camelCase in the `block_device_map` argument. +## Without this warning, camelCase inputs will be silently ignored and replaced with defaults, +## which is very hard to notice and debug. +# +## We just need some kind of data source or resource to trigger the warning. +## Because we need it to run for each node group, there are no good options +## among actually useful data sources or resources. We also have to ensure +## that Terraform updates it when the `block_device_map` argument changes, +## and does not skip the checks because it can use the cached value. +resource "random_pet" "camel_case_warning" { + for_each = local.node_groups_enabled ? var.node_groups : {} + + keepers = { + hash = base64sha256(jsonencode(local.block_device_map_w_defaults[each.key])) + } + + lifecycle { + precondition { + condition = length(compact(flatten([for device_name, device_map in local.block_device_map_w_defaults[each.key] : [ + lookup(device_map.ebs, "volumeSize", null), + lookup(device_map.ebs, "volumeType", null), + lookup(device_map.ebs, "kmsKeyId", null), + lookup(device_map.ebs, "deleteOnTermination", null), + lookup(device_map.ebs, "snapshotId", null), + ] + ]))) == 0 + error_message = <<-EOT + The `block_device_map` argument in the `node_groups[${each.key}]` module + does not support the `volumeSize`, `volumeType`, `kmsKeyId`, `deleteOnTermination`, or `snapshotId` arguments. + Please use `volume_size`, `volume_type`, `kms_key_id`, `delete_on_termination`, and `snapshot_id` instead." + EOT + } + } +} + +# DEPRECATION SUPPORT +# `disk_size` and `disk_encryption_enabled are deprecated in favor of `block_device_map`. +# Convert legacy use to new format. + +locals { + legacy_disk_inputs = { + for k, v in(local.node_groups_enabled ? var.node_groups : {}) : k => { + disk_encryption_enabled = v.disk_encryption_enabled == null ? var.node_group_defaults.disk_encryption_enabled : v.disk_encryption_enabled + disk_size = v.disk_size == null ? var.node_group_defaults.disk_size : v.disk_size + } if( + ((v.disk_encryption_enabled == null ? var.node_group_defaults.disk_encryption_enabled : v.disk_encryption_enabled) != null) + || ((v.disk_size == null ? var.node_group_defaults.disk_size : v.disk_size) != null) + ) + } + + legacy_converted_block_device_map = { + for k, v in local.legacy_disk_inputs : k => { + "/dev/xvda" = { + no_device = null + virtual_name = null + ebs = { + delete_on_termination = true + encrypted = v.disk_encryption_enabled + iops = null + kms_key_id = null + snapshot_id = null + throughput = null + volume_size = v.disk_size + volume_type = "gp2" + } # ebs + } # "/dev/xvda" + } # k => { "/dev/xvda" = { ... } } + } + + block_device_map_w_defaults = { + for k, v in(local.node_groups_enabled ? var.node_groups : {}) : k => + v.block_device_map == null ? var.node_group_defaults.block_device_map : v.block_device_map + } + +} diff --git a/modules/eks/cluster/fargate-profiles.tf b/modules/eks/cluster/fargate-profiles.tf index d0eb07857..17e494572 100644 --- a/modules/eks/cluster/fargate-profiles.tf +++ b/modules/eks/cluster/fargate-profiles.tf @@ -1,19 +1,49 @@ locals { - fargate_profiles = local.enabled ? var.fargate_profiles : {} + fargate_profiles = local.enabled ? var.fargate_profiles : {} + fargate_cluster_pod_execution_role_name = "${local.eks_cluster_id}-fargate" + fargate_cluster_pod_execution_role_needed = local.enabled && ( + local.addons_require_fargate || + ((length(var.fargate_profiles) > 0) && !var.legacy_fargate_1_role_per_profile_enabled) + ) } +module "fargate_pod_execution_role" { + count = local.fargate_cluster_pod_execution_role_needed ? 1 : 0 + + source = "cloudposse/eks-fargate-profile/aws" + version = "1.3.0" + + subnet_ids = local.private_subnet_ids + cluster_name = local.eks_cluster_id + permissions_boundary = var.fargate_profile_iam_role_permissions_boundary + + fargate_profile_enabled = false + fargate_pod_execution_role_enabled = true + fargate_pod_execution_role_name = local.fargate_cluster_pod_execution_role_name + + context = module.this.context +} + + +############################################################################### +### Both New and Legacy behavior, use caution when modifying +############################################################################### module "fargate_profile" { source = "cloudposse/eks-fargate-profile/aws" - version = "1.1.0" + version = "1.3.0" for_each = local.fargate_profiles subnet_ids = local.private_subnet_ids - cluster_name = module.eks_cluster.eks_cluster_id + cluster_name = local.eks_cluster_id kubernetes_namespace = each.value.kubernetes_namespace kubernetes_labels = each.value.kubernetes_labels permissions_boundary = var.fargate_profile_iam_role_permissions_boundary iam_role_kubernetes_namespace_delimiter = var.fargate_profile_iam_role_kubernetes_namespace_delimiter + ## Legacy switch + fargate_pod_execution_role_enabled = var.legacy_fargate_1_role_per_profile_enabled + fargate_pod_execution_role_arn = var.legacy_fargate_1_role_per_profile_enabled ? null : one(module.fargate_pod_execution_role[*].eks_fargate_pod_execution_role_arn) + context = module.this.context } diff --git a/modules/eks/cluster/karpenter.tf b/modules/eks/cluster/karpenter.tf index 46cec385c..b57efe889 100644 --- a/modules/eks/cluster/karpenter.tf +++ b/modules/eks/cluster/karpenter.tf @@ -13,6 +13,8 @@ locals { karpenter_iam_role_enabled = local.enabled && var.karpenter_iam_role_enabled + karpenter_instance_profile_enabled = local.karpenter_iam_role_enabled && !var.legacy_do_not_create_karpenter_instance_profile + # Used to determine correct partition (i.e. - `aws`, `aws-gov`, `aws-cn`, etc.) partition = one(data.aws_partition.current[*].partition) } @@ -55,6 +57,14 @@ resource "aws_iam_role" "karpenter" { tags = module.karpenter_label.tags } +resource "aws_iam_instance_profile" "default" { + count = local.karpenter_instance_profile_enabled ? 1 : 0 + + name = one(aws_iam_role.karpenter[*].name) + role = one(aws_iam_role.karpenter[*].name) + tags = module.karpenter_label.tags +} + # AmazonSSMManagedInstanceCore policy is required by Karpenter resource "aws_iam_role_policy_attachment" "amazon_ssm_managed_instance_core" { count = local.karpenter_iam_role_enabled ? 1 : 0 diff --git a/modules/eks/cluster/main.tf b/modules/eks/cluster/main.tf index 8a1a80d95..c9677bb9b 100644 --- a/modules/eks/cluster/main.tf +++ b/modules/eks/cluster/main.tf @@ -1,65 +1,79 @@ locals { - enabled = module.this.enabled - primary_role_map = module.team_roles.outputs.team_name_role_arn_map - delegated_role_map = module.delegated_roles.outputs.role_name_role_arn_map - eks_outputs = module.eks.outputs - vpc_outputs = module.vpc.outputs - - attributes = flatten(concat(module.this.attributes, [var.color])) - public_subnet_ids = local.vpc_outputs.public_subnet_ids - private_subnet_ids = local.vpc_outputs.private_subnet_ids - vpc_id = local.vpc_outputs.vpc_id - - iam_primary_roles_tenant_name = coalesce(var.iam_primary_roles_tenant_name, module.this.tenant) - - primary_iam_roles = [for role in var.primary_iam_roles : { - rolearn = local.primary_role_map[role.role] - username = module.this.context.tenant != null ? format("%s-identity-%s", local.iam_primary_roles_tenant_name, role.role) : format("identity-%s", role.role) - groups = role.groups - }] + enabled = module.this.enabled + vpc_outputs = module.vpc.outputs + + attributes = flatten(concat(module.this.attributes, [var.color])) + + this_account_name = module.iam_roles.current_account_account_name - delegated_iam_roles = [for role in var.delegated_iam_roles : { - rolearn = local.delegated_role_map[role.role] - username = module.this.context.tenant != null ? format("%s-%s-%s", module.this.tenant, module.this.stage, role.role) : format("%s-%s", module.this.stage, role.role) - groups = role.groups + role_map = { (local.this_account_name) = var.aws_team_roles_rbac[*].aws_team_role } + + aws_team_roles_auth = [for role in var.aws_team_roles_rbac : { + rolearn = module.iam_arns.principals_map[local.this_account_name][role.aws_team_role] + groups = role.groups }] - # Existing Fargate Profile role ARNs - fargate_profile_role_arns = local.eks_outputs.fargate_profile_role_arns - - map_fargate_profile_roles = [ - for role_arn in local.fargate_profile_role_arns : { - rolearn : role_arn - username : "system:node:{{SessionName}}" - groups : [ - "system:bootstrappers", - "system:nodes", - # `system:node-proxier` is required by Fargate (and it's added automatically to the `aws-auth` ConfigMap when a Fargate Profile gets created, so we need to add it back) - # Allows access to the resources required by the `kube-proxy` component - # https://kubernetes.io/docs/reference/access-authn-authz/rbac/ - "system:node-proxier" - ] + aws_team_roles_access_entry_map = { + for role in local.aws_team_roles_auth : role.rolearn => { + kubernetes_groups = role.groups + } + } + + ## For future reference, as we enhance support for EKS Policies + ## and namespace limits, here are some examples of entries: + # access_entry_map = { + # "arn:aws:iam:::role/prefix-admin" = { + # access_policy_associations = { + # ClusterAdmin = {} + # } + # } + # "arn:aws:iam:::role/prefix-observer" = { + # kubernetes_groups = ["view"] + # } + # } + # + # access_entry_map = merge({ for role in local.aws_team_roles_auth : role.rolearn => { + # kubernetes_groups = role.groups + # } }, {for role in module.eks_workers[*].workers_role_arn : role => { + # type = "EC2_LINUX" + # }}) + + iam_roles_access_entry_map = { + for role in var.map_additional_iam_roles : role.rolearn => { + kubernetes_groups = role.groups } - ] + } - map_additional_iam_roles = concat( - local.primary_iam_roles, - local.delegated_iam_roles, - var.map_additional_iam_roles, - local.map_fargate_profile_roles, - ) + iam_users_access_entry_map = { + for role in var.map_additional_iam_users : role.rolearn => { + kubernetes_groups = role.groups + } + } - # Existing managed worker role ARNs - managed_worker_role_arns = local.eks_outputs.eks_managed_node_workers_role_arns + access_entry_map = merge(local.aws_team_roles_access_entry_map, local.aws_sso_access_entry_map, local.iam_roles_access_entry_map, local.iam_users_access_entry_map) - # If Karpenter IAM role is enabled, add it to the `aws-auth` ConfigMap to allow the nodes launched by Karpenter to join the EKS cluster + # If Karpenter IAM role is enabled, give it access to the cluster to allow the nodes launched by Karpenter to join the EKS cluster karpenter_role_arn = one(aws_iam_role.karpenter[*].arn) - worker_role_arns = compact(concat( + linux_worker_role_arns = local.enabled ? concat( var.map_additional_worker_roles, - local.managed_worker_role_arns, - [local.karpenter_role_arn] - )) + # As of Karpenter v0.35.0, there is no entry in the official Karpenter documentation + # stating how to configure Karpenter node roles via EKS Access Entries. + # However, it is launching unmanaged worker nodes, so it makes sense that they + # be configured as EC2_LINUX unmanaged worker nodes. Of course, this probably + # does not work if they are Windows nodes, but at the moment, this component + # probably has other deficiencies that would prevent it from working with Windows nodes, + # so we will stick with just saying Windows is not supported until we have some need for it. + local.karpenter_iam_role_enabled ? [local.karpenter_role_arn] : [], + ) : [] + + # For backwards compatibility, we need to add the unmanaged worker role ARNs, but + # historically we did not care whether they were LINUX or WINDOWS. + # Best we can do is guess that they are LINUX. The `eks-cluster` module + # did not give them all the support needed to run Windows anyway. + access_entries_for_nodes = length(local.linux_worker_role_arns) > 0 ? { + EC2_LINUX = local.linux_worker_role_arns + } : {} subnet_type_tag_key = var.subnet_type_tag_key != null ? var.subnet_type_tag_key : local.vpc_outputs.vpc.subnet_type_tag_key @@ -70,78 +84,110 @@ locals { module.vpc_ingress[k].outputs.vpc_cidr ] ) + + vpc_id = local.vpc_outputs.vpc_id + + availability_zones_expanded = local.enabled && length(var.availability_zones) > 0 && length(var.availability_zone_ids) == 0 ? ( + (substr( + var.availability_zones[0], + 0, + length(var.region) + ) == var.region) ? var.availability_zones : formatlist("${var.region}%s", var.availability_zones) + ) : [] + + short_region = module.utils.region_az_alt_code_maps["to_short"][var.region] + + availability_zone_ids_expanded = local.enabled && length(var.availability_zone_ids) > 0 ? ( + (substr( + var.availability_zone_ids[0], + 0, + length(local.short_region) + ) == local.short_region) ? var.availability_zone_ids : formatlist("${local.short_region}%s", var.availability_zone_ids) + ) : [] + + # Create a map of AZ IDs to AZ names (and the reverse), + # but fail safely, because AZ IDs are not always available. + az_id_map = length(local.availability_zone_ids_expanded) > 0 ? try(zipmap(data.aws_availability_zones.default[0].zone_ids, data.aws_availability_zones.default[0].names), {}) : {} + + availability_zones_normalized = length(local.availability_zone_ids_expanded) > 0 ? [ + for v in local.availability_zone_ids_expanded : local.az_id_map[v] + ] : local.availability_zones_expanded + + # Get only the public subnets that correspond to the AZs provided in `var.availability_zones` + # `az_public_subnets_map` is a map of AZ names to list of public subnet IDs in the AZs + # LEGACY SUPPORT for legacy VPC with no az_public_subnets_map + public_subnet_ids = try(flatten([ + for k, v in local.vpc_outputs.az_public_subnets_map : v + if contains(var.availability_zones, k) || length(var.availability_zones) == 0 + ]), + local.vpc_outputs.public_subnet_ids) + + # Get only the private subnets that correspond to the AZs provided in `var.availability_zones` + # `az_private_subnets_map` is a map of AZ names to list of private subnet IDs in the AZs + # LEGACY SUPPORT for legacy VPC with no az_public_subnets_map + private_subnet_ids = try(flatten([ + for k, v in local.vpc_outputs.az_private_subnets_map : v + if contains(var.availability_zones, k) || length(var.availability_zones) == 0 + ]), + local.vpc_outputs.private_subnet_ids) + + # Infer the availability zones from the private subnets if var.availability_zones is empty: + availability_zones = local.enabled ? (length(local.availability_zones_normalized) == 0 ? keys(local.vpc_outputs.az_private_subnets_map) : local.availability_zones_normalized) : [] +} + +data "aws_availability_zones" "default" { + count = length(local.availability_zone_ids_expanded) > 0 ? 1 : 0 + + # Filter out Local Zones. See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones#by-filter + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } + + lifecycle { + postcondition { + condition = length(self.zone_ids) > 0 + error_message = "No availability zones IDs found in region ${var.region}. You must specify availability zones instead." + } + } +} + +module "utils" { + source = "cloudposse/utils/aws" + version = "1.3.0" } module "eks_cluster" { source = "cloudposse/eks-cluster/aws" - version = "2.5.0" + version = "4.1.0" region = var.region attributes = local.attributes - kube_data_auth_enabled = false - # exec_auth is more reliable than data_auth when the aws CLI is available - # Details at https://github.com/cloudposse/terraform-aws-eks-cluster/releases/tag/0.42.0 - kube_exec_auth_enabled = !var.kubeconfig_file_enabled - # If using `exec` method (recommended) for authentication, provide an explict - # IAM role ARN to exec as for authentication to EKS cluster. - kube_exec_auth_role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) - kube_exec_auth_role_arn_enabled = true - # Path to KUBECONFIG file to use to access the EKS cluster - kubeconfig_path = var.kubeconfig_file - kubeconfig_path_enabled = var.kubeconfig_file_enabled - - allowed_security_groups = var.allowed_security_groups + access_config = var.access_config + access_entry_map = local.access_entry_map + access_entries_for_nodes = local.access_entries_for_nodes + + + allowed_security_group_ids = var.allowed_security_groups allowed_cidr_blocks = local.allowed_cidr_blocks - apply_config_map_aws_auth = var.apply_config_map_aws_auth cluster_log_retention_period = var.cluster_log_retention_period enabled_cluster_log_types = var.enabled_cluster_log_types endpoint_private_access = var.cluster_endpoint_private_access endpoint_public_access = var.cluster_endpoint_public_access kubernetes_version = var.cluster_kubernetes_version oidc_provider_enabled = var.oidc_provider_enabled - map_additional_aws_accounts = var.map_additional_aws_accounts - map_additional_iam_roles = local.map_additional_iam_roles - map_additional_iam_users = var.map_additional_iam_users public_access_cidrs = var.public_access_cidrs subnet_ids = var.cluster_private_subnets_only ? local.private_subnet_ids : concat(local.private_subnet_ids, local.public_subnet_ids) - vpc_id = local.vpc_id - kubernetes_config_map_ignore_role_changes = false - # Managed Node Groups do not expose nor accept any Security Groups. - # Instead, EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads. - #workers_security_group_ids = compact([local.vpn_allowed_cidr_sg]) + # EKS addons + addons = local.addons - # Ensure ordering of resource creation: - # 1. Create the EKS cluster - # 2. Create any resources OTHER THAN MANAGED NODE GROUPS that need to be added to the - # Kubernetes `aws-auth` configMap by our Terraform - # 3. Use Terraform to create the Kubernetes `aws-auth` configMap we need - # 4. Create managed node groups. AWS EKS will automatically add newly created - # managed node groups to the Kubernetes `aws-auth` configMap. - # - # We must execute steps in this order because: - # - 1 before 3 because we cannot add a configMap to a cluster that does not exist - # - 2 before 3 because Terraform will not create and update the configMap in separate steps, so it must have - # all the data to add before it creates the configMap - # - 3 before 4 because EKS will create the Kubernetes `aws-auth` configMap if it does not exist - # when it creates the first managed node group, and Terraform will not modify a resource it did not create - # - # We count on the EKS cluster module to ensure steps 1-3 are done in the right order. - # We then depend on the kubernetes_config_map_id, using the `module_depends_on` feature of the node-group module, - # to ensure we do not proceed to step 4 until after step 3 is completed. - - # workers_role_arns is part of the data that needs to be collected/created in step 2 above - # because it goes into the `aws-auth` configMap created in step 3. However, because of the - # ordering requirements, we cannot wait for new managed node groups to be created. Fortunately, - # this is not necessary, because AWS EKS will automatically add node groups to the `aws-auth` configMap - # when they are created. However, after they are created, they will not be replaced if they are - # later removed, and in step 3 we replace the entire configMap. So we have to add the pre-existing - # managed node groups here, and we get that by reading our current (pre plan or apply) Terraform state. - workers_role_arns = local.worker_role_arns - - aws_auth_yaml_strip_quotes = var.aws_auth_yaml_strip_quotes + addons_depends_on = var.addons_depends_on ? concat( + [module.region_node_group], local.addons_depends_on, + values(local.final_addon_service_account_role_arn_map) + ) : null cluster_encryption_config_enabled = var.cluster_encryption_config_enabled cluster_encryption_config_kms_key_id = var.cluster_encryption_config_kms_key_id @@ -152,4 +198,3 @@ module "eks_cluster" { context = module.this.context } - diff --git a/modules/eks/cluster/modules/node_group_by_az/main.tf b/modules/eks/cluster/modules/node_group_by_az/main.tf index 080e6ccdc..8172f0a05 100644 --- a/modules/eks/cluster/modules/node_group_by_az/main.tf +++ b/modules/eks/cluster/modules/node_group_by_az/main.tf @@ -18,7 +18,7 @@ data "aws_subnets" "private" { module "az_abbreviation" { source = "cloudposse/utils/aws" - version = "1.1.0" + version = "1.3.0" } locals { @@ -28,11 +28,17 @@ locals { subnet_ids = local.subnet_ids_test[0] == local.sentinel ? null : local.subnet_ids_test az_map = var.cluster_context.az_abbreviation_type == "short" ? module.az_abbreviation.region_az_alt_code_maps.to_short : module.az_abbreviation.region_az_alt_code_maps.to_fixed az_attribute = local.az_map[var.availability_zone] + + before_cluster_joining_userdata = var.cluster_context.node_userdata.before_cluster_joining_userdata != null ? [trimspace(var.cluster_context.node_userdata.before_cluster_joining_userdata)] : [] + bootstrap_extra_args = var.cluster_context.node_userdata.bootstrap_extra_args != null ? [trimspace(var.cluster_context.node_userdata.bootstrap_extra_args)] : [] + kubelet_extra_args = var.cluster_context.node_userdata.kubelet_extra_args != null ? [trimspace(var.cluster_context.node_userdata.kubelet_extra_args)] : [] + after_cluster_joining_userdata = var.cluster_context.node_userdata.after_cluster_joining_userdata != null ? [trimspace(var.cluster_context.node_userdata.after_cluster_joining_userdata)] : [] + } module "eks_node_group" { source = "cloudposse/eks-node-group/aws" - version = "2.6.0" + version = "3.0.1" enabled = local.enabled @@ -57,13 +63,14 @@ module "eks_node_group" { resources_to_tag = local.enabled ? var.cluster_context.resources_to_tag : null subnet_ids = local.enabled ? local.subnet_ids : null - block_device_mappings = local.enabled ? [{ - device_name = "/dev/xvda" - volume_size = var.cluster_context.disk_size - volume_type = "gp2" - encrypted = var.cluster_context.disk_encryption_enabled - delete_on_termination = true - }] : [] + # node_userdata + before_cluster_joining_userdata = local.enabled ? local.before_cluster_joining_userdata : [] + bootstrap_additional_options = local.enabled ? local.bootstrap_extra_args : [] + kubelet_additional_options = local.enabled ? local.kubelet_extra_args : [] + after_cluster_joining_userdata = local.enabled ? local.after_cluster_joining_userdata : [] + + + block_device_map = local.enabled ? var.cluster_context.block_device_map : null # Prevent the node groups from being created before the Kubernetes aws-auth configMap module_depends_on = var.cluster_context.module_depends_on diff --git a/modules/eks/cluster/modules/node_group_by_az/variables.tf b/modules/eks/cluster/modules/node_group_by_az/variables.tf index adcb1ba0b..a167d6ae1 100644 --- a/modules/eks/cluster/modules/node_group_by_az/variables.tf +++ b/modules/eks/cluster/modules/node_group_by_az/variables.tf @@ -20,21 +20,45 @@ variable "cluster_context" { cluster_autoscaler_enabled = bool cluster_name = string create_before_destroy = bool - disk_encryption_enabled = bool - disk_size = number - instance_types = list(string) - kubernetes_labels = map(string) + # Obsolete, replaced by block_device_map + # disk_encryption_enabled = bool + # disk_size = number + instance_types = list(string) + kubernetes_labels = map(string) kubernetes_taints = list(object({ key = string value = string effect = string })) + node_userdata = object({ + before_cluster_joining_userdata = optional(string) + bootstrap_extra_args = optional(string) + kubelet_extra_args = optional(string) + after_cluster_joining_userdata = optional(string) + }) kubernetes_version = string module_depends_on = any resources_to_tag = list(string) subnet_type_tag_key = string aws_ssm_agent_enabled = bool vpc_id = string + + # block_device_map copied from cloudposse/terraform-aws-eks-node-group + # Really, nothing is optional, but easier to keep in sync via copy and paste + block_device_map = map(object({ + no_device = optional(bool, null) + virtual_name = optional(string, null) + ebs = optional(object({ + delete_on_termination = optional(bool, true) + encrypted = optional(bool, true) + iops = optional(number, null) + kms_key_id = optional(string, null) + snapshot_id = optional(string, null) + throughput = optional(number, null) + volume_size = optional(number, 20) + volume_type = optional(string, "gp3") + })) + })) }) description = "The common settings for all node groups." } diff --git a/modules/eks/cluster/modules/node_group_by_region/main.tf b/modules/eks/cluster/modules/node_group_by_region/main.tf index 9f5c3f9ea..a14a3c7c1 100644 --- a/modules/eks/cluster/modules/node_group_by_region/main.tf +++ b/modules/eks/cluster/modules/node_group_by_region/main.tf @@ -3,7 +3,6 @@ locals { az_list = tolist(local.az_set) } - module "node_group" { for_each = module.this.enabled ? local.az_set : [] diff --git a/modules/eks/cluster/modules/node_group_by_region/variables.tf b/modules/eks/cluster/modules/node_group_by_region/variables.tf index b413c2181..7b902c186 100644 --- a/modules/eks/cluster/modules/node_group_by_region/variables.tf +++ b/modules/eks/cluster/modules/node_group_by_region/variables.tf @@ -21,21 +21,46 @@ variable "cluster_context" { cluster_autoscaler_enabled = bool cluster_name = string create_before_destroy = bool - disk_encryption_enabled = bool - disk_size = number - instance_types = list(string) - kubernetes_labels = map(string) + # Obsolete, replaced by block_device_map + # disk_encryption_enabled = bool + # disk_size = number + instance_types = list(string) + kubernetes_labels = map(string) kubernetes_taints = list(object({ key = string value = string effect = string })) + node_userdata = object({ + before_cluster_joining_userdata = optional(string) + bootstrap_extra_args = optional(string) + kubelet_extra_args = optional(string) + after_cluster_joining_userdata = optional(string) + }) kubernetes_version = string - module_depends_on = any + module_depends_on = optional(any) resources_to_tag = list(string) subnet_type_tag_key = string aws_ssm_agent_enabled = bool vpc_id = string + + # block_device_map copied from cloudposse/terraform-aws-eks-node-group + # Really, nothing is optional, but easier to keep in sync via copy and paste + block_device_map = map(object({ + no_device = optional(bool, null) + virtual_name = optional(string, null) + ebs = optional(object({ + delete_on_termination = optional(bool, true) + encrypted = optional(bool, true) + iops = optional(number, null) + kms_key_id = optional(string, null) + snapshot_id = optional(string, null) + throughput = optional(number, null) + volume_size = optional(number, 20) + volume_type = optional(string, "gp3") + })) + })) + }) description = "The common settings for all node groups." } diff --git a/modules/eks/cluster/outputs.tf b/modules/eks/cluster/outputs.tf index e32789647..186669f3e 100644 --- a/modules/eks/cluster/outputs.tf +++ b/modules/eks/cluster/outputs.tf @@ -60,7 +60,7 @@ output "eks_node_group_role_names" { output "eks_auth_worker_roles" { description = "List of worker IAM roles that were included in the `auth-map` ConfigMap." - value = local.worker_role_arns + value = local.linux_worker_role_arns } output "eks_node_group_statuses" { @@ -80,15 +80,35 @@ output "karpenter_iam_role_name" { output "fargate_profiles" { description = "Fargate Profiles" - value = module.fargate_profile + value = merge(module.fargate_profile, local.addon_fargate_profiles) } output "fargate_profile_role_arns" { description = "Fargate Profile Role ARNs" - value = values(module.fargate_profile)[*].eks_fargate_profile_role_arn + value = distinct(compact(concat(values(module.fargate_profile)[*].eks_fargate_profile_role_arn, + [one(module.fargate_pod_execution_role[*].eks_fargate_pod_execution_role_arn)] + ))) + } output "fargate_profile_role_names" { description = "Fargate Profile Role names" - value = values(module.fargate_profile)[*].eks_fargate_profile_role_name + value = distinct(compact(concat(values(module.fargate_profile)[*].eks_fargate_profile_role_name, + [one(module.fargate_pod_execution_role[*].eks_fargate_pod_execution_role_name)] + ))) +} + +output "vpc_cidr" { + description = "The CIDR of the VPC where this cluster is deployed." + value = local.vpc_outputs.vpc_cidr +} + +output "availability_zones" { + description = "Availability Zones in which the cluster is provisioned" + value = local.availability_zones +} + +output "eks_addons_versions" { + description = "Map of enabled EKS Addons names and versions" + value = module.eks_cluster.eks_addons_versions } diff --git a/modules/eks/cluster/providers.tf b/modules/eks/cluster/providers.tf index 9610c5073..8ad77541f 100644 --- a/modules/eks/cluster/providers.tf +++ b/modules/eks/cluster/providers.tf @@ -2,8 +2,6 @@ provider "aws" { region = var.region assume_role { - # `terraform import` will not use data from a data source, - # so on import we have to explicitly specify the role # WARNING: # The EKS cluster is owned by the role that created it, and that # role is the only role that can access the cluster without an @@ -11,19 +9,15 @@ provider "aws" { # with the provisioned Terraform role and not an SSO role that could # be removed without notice. # - # i.e. Only NON SSO assumed roles such as spacelift assumed roles, can - # plan this terraform module. - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + # This should only be run using the target account's Terraform role. + role_arn = module.iam_roles.terraform_role_arn } } module "iam_roles" { - source = "../../account-map/modules/iam-roles" - context = module.this.context -} + source = "../../account-map/modules/iam-roles" -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" + profiles_enabled = false + + context = module.this.context } diff --git a/modules/eks/cluster/remote-state.tf b/modules/eks/cluster/remote-state.tf index bac0ec31e..0ad0cd62d 100644 --- a/modules/eks/cluster/remote-state.tf +++ b/modules/eks/cluster/remote-state.tf @@ -1,67 +1,48 @@ locals { - accounts_with_vpc = { for i, account in var.allow_ingress_from_vpc_accounts : try(account.tenant, module.this.tenant) != null ? format("%s-%s", account.tenant, account.stage) : account.stage => account } + accounts_with_vpc = local.enabled ? { + for i, account in var.allow_ingress_from_vpc_accounts : try(account.tenant, module.this.tenant) != null ? format("%s-%s-%s", account.tenant, account.stage, try(account.environment, module.this.environment)) : format("%s-%s", account.stage, try(account.environment, module.this.environment)) => account + } : {} } -module "vpc" { - source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" - - component = "vpc" - - context = module.this.context -} - -module "vpc_ingress" { - source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" +module "iam_arns" { + source = "../../account-map/modules/roles-to-principals" - for_each = local.accounts_with_vpc - - component = "vpc" - environment = try(each.value.environment, module.this.environment) - stage = try(each.value.stage, module.this.environment) - tenant = try(each.value.tenant, module.this.tenant) + role_map = local.role_map context = module.this.context } -module "team_roles" { +module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" - component = "aws-teams" + bypass = !local.enabled + component = var.vpc_component_name - tenant = local.iam_primary_roles_tenant_name - environment = var.iam_roles_environment_name - stage = var.iam_primary_roles_stage_name + defaults = { + public_subnet_ids = [] + private_subnet_ids = [] + vpc = { + subnet_type_tag_key = "" + } + vpc_cidr = null + vpc_id = null + } context = module.this.context } -module "delegated_roles" { +module "vpc_ingress" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" - component = "aws-team-roles" + for_each = local.accounts_with_vpc - environment = var.iam_roles_environment_name + component = var.vpc_component_name + environment = try(each.value.environment, module.this.environment) + stage = try(each.value.stage, module.this.stage) + tenant = try(each.value.tenant, module.this.tenant) context = module.this.context } -# Yes, this is self-referential. -# It obtains the previous state of the cluster so that we can add -# to it rather than overwrite it (specifically the aws-auth configMap) -module "eks" { - source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" - - component = var.eks_component_name - - defaults = { - eks_managed_node_workers_role_arns = [] - fargate_profile_role_arns = [] - } - - context = module.this.context -} diff --git a/modules/eks/cluster/variables-deprecated.tf b/modules/eks/cluster/variables-deprecated.tf new file mode 100644 index 000000000..a6c952846 --- /dev/null +++ b/modules/eks/cluster/variables-deprecated.tf @@ -0,0 +1,58 @@ +variable "apply_config_map_aws_auth" { + type = bool + description = <<-EOT + (Obsolete) Whether to execute `kubectl apply` to apply the ConfigMap to allow worker nodes to join the EKS cluster. + This input is included to avoid breaking existing configurations that set it to `true`; + a value of `false` is no longer allowed. + This input is obsolete and will be removed in a future release. + EOT + default = true + nullable = false + validation { + condition = var.apply_config_map_aws_auth == true + error_message = <<-EOT + This component no longer supports the `aws-auth` ConfigMap and always updates the access. + This input is obsolete and will be removed in a future release. + EOT + } +} + +variable "map_additional_aws_accounts" { + type = list(string) + description = <<-EOT + (Obsolete) Additional AWS accounts to grant access to the EKS cluster. + This input is included to avoid breaking existing configurations that + supplied an empty list, but the list is no longer allowed to have entries. + (It is not clear that it worked properly in earlier versions in any case.) + This component now only supports EKS access entries, which require full principal ARNs. + This input is deprecated and will be removed in a future release. + EOT + default = [] + nullable = false + validation { + condition = length(var.map_additional_aws_accounts) == 0 + error_message = <<-EOT + This component no longer supports `map_additional_aws_accounts`. + (It is not clear that it worked properly in earlier versions in any case.) + This component only supports EKS access entries, which require full principal ARNs. + This input is deprecated and will be removed in a future release. + EOT + } +} + +variable "map_additional_worker_roles" { + type = list(string) + description = <<-EOT + (Deprecated) AWS IAM Role ARNs of unmanaged Linux worker nodes to grant access to the EKS cluster. + In earlier versions, this could be used to grant access to worker nodes of any type + that were not managed by the EKS cluster. Now EKS requires that unmanaged worker nodes + be classified as Linux or Windows servers, in this input is temporarily retained + with the assumption that all worker nodes are Linux servers. (It is likely that + earlier versions did not work properly with Windows worker nodes anyway.) + This input is deprecated and will be removed in a future release. + In the future, this component will either have a way to separate Linux and Windows worker nodes, + or drop support for unmanaged worker nodes entirely. + EOT + default = [] + nullable = false +} diff --git a/modules/eks/cluster/variables.tf b/modules/eks/cluster/variables.tf index 161e01601..4d1e1d884 100644 --- a/modules/eks/cluster/variables.tf +++ b/modules/eks/cluster/variables.tf @@ -7,7 +7,21 @@ variable "availability_zones" { type = list(string) description = <<-EOT AWS Availability Zones in which to deploy multi-AZ resources. - If not provided, resources will be provisioned in every private subnet in the VPC. + Ignored if `availability_zone_ids` is set. + Can be the full name, e.g. `us-east-1a`, or just the part after the region, e.g. `a` to allow reusable values across regions. + If not provided, resources will be provisioned in every zone with a private subnet in the VPC. + EOT + default = [] + nullable = false +} + +variable "availability_zone_ids" { + type = list(string) + description = <<-EOT + List of Availability Zones IDs where subnets will be created. Overrides `availability_zones`. + Can be the full name, e.g. `use1-az1`, or just the part after the AZ ID region code, e.g. `-az1`, + to allow reusable values across regions. Consider contention for resources and spot pricing in each AZ when selecting. + Useful in some regions when using only some AZs and you want to use the same ones across multiple accounts. EOT default = [] } @@ -16,6 +30,8 @@ variable "availability_zone_abbreviation_type" { type = string description = "Type of Availability Zone abbreviation (either `fixed` or `short`) to use in names. See https://github.com/cloudposse/terraform-aws-utils for details." default = "fixed" + nullable = false + validation { condition = contains(["fixed", "short"], var.availability_zone_abbreviation_type) error_message = "The availability_zone_abbreviation_type must be either \"fixed\" or \"short\"." @@ -26,320 +42,393 @@ variable "managed_node_groups_enabled" { type = bool description = "Set false to prevent the creation of EKS managed node groups." default = true + nullable = false } variable "oidc_provider_enabled" { type = bool description = "Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using kiam or kube2iam. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html" + default = true + nullable = false } variable "cluster_endpoint_private_access" { type = bool - default = false description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is `false`" + default = false + nullable = false } variable "cluster_endpoint_public_access" { type = bool - default = true description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is `true`" + default = true + nullable = false } variable "cluster_kubernetes_version" { type = string - default = null description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used" + default = null } variable "public_access_cidrs" { type = list(string) - default = ["0.0.0.0/0"] description = "Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0." + default = ["0.0.0.0/0"] + nullable = false } variable "enabled_cluster_log_types" { type = list(string) - default = [] description = "A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`]" + default = [] + nullable = false } variable "cluster_log_retention_period" { type = number - default = 0 description = "Number of days to retain cluster logs. Requires `enabled_cluster_log_types` to be set. See https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html." + default = 0 + nullable = false } -variable "apply_config_map_aws_auth" { - type = bool - default = true - description = "Whether to execute `kubectl apply` to apply the ConfigMap to allow worker nodes to join the EKS cluster" -} - -variable "map_additional_aws_accounts" { - description = "Additional AWS account numbers to add to `aws-auth` ConfigMap" - type = list(string) - default = [] -} - -variable "map_additional_worker_roles" { - description = "AWS IAM Role ARNs of worker nodes to add to `aws-auth` ConfigMap" - type = list(string) - default = [] -} - -variable "primary_iam_roles" { - description = "Primary IAM roles to add to `aws-auth` ConfigMap" - +# TODO: +# - Support EKS Access Policies +# - Support namespaced access limits +# - Support roles from other accounts +# - Either combine with Permission Sets or similarly enhance Permission Set support +variable "aws_team_roles_rbac" { type = list(object({ - role = string - groups = list(string) + aws_team_role = string + groups = list(string) })) - default = [] + description = "List of `aws-team-roles` (in the target AWS account) to map to Kubernetes RBAC groups." + default = [] + nullable = false } -variable "delegated_iam_roles" { - description = "Delegated IAM roles to add to `aws-auth` ConfigMap" - +variable "aws_sso_permission_sets_rbac" { type = list(object({ - role = string - groups = list(string) + aws_sso_permission_set = string + groups = list(string) })) - default = [] + description = <<-EOT + (Not Recommended): AWS SSO (IAM Identity Center) permission sets in the EKS deployment account to add to `aws-auth` ConfigMap. + Unfortunately, `aws-auth` ConfigMap does not support SSO permission sets, so we map the generated + IAM Role ARN corresponding to the permission set at the time Terraform runs. This is subject to change + when any changes are made to the AWS SSO configuration, invalidating the mapping, and requiring a + `terraform apply` in this project to update the `aws-auth` ConfigMap and restore access. + EOT + + default = [] + nullable = false } +# TODO: +# - Support EKS Access Policies +# - Support namespaced access limits +# - Combine with`map_additional_iam_users` into new input variable "map_additional_iam_roles" { - description = "Additional IAM roles to add to `config-map-aws-auth` ConfigMap" - type = list(object({ rolearn = string - username = string + username = optional(string) groups = list(string) })) - default = [] + description = <<-EOT + Additional IAM roles to grant access to the cluster. + *WARNING*: Full Role ARN, including path, is required for `rolearn`. + In earlier versions (with `aws-auth` ConfigMap), only the path + had to be removed from the Role ARN. The path is now required. + `username` is now ignored. This input is planned to be replaced + in a future release with a more flexible input structure that consolidates + `map_additional_iam_roles` and `map_additional_iam_users`. + EOT + default = [] + nullable = false } variable "map_additional_iam_users" { - description = "Additional IAM users to add to `aws-auth` ConfigMap" - type = list(object({ userarn = string - username = string + username = optional(string) groups = list(string) })) - default = [] + description = <<-EOT + Additional IAM roles to grant access to the cluster. + `username` is now ignored. This input is planned to be replaced + in a future release with a more flexible input structure that consolidates + `map_additional_iam_roles` and `map_additional_iam_users`. + EOT + default = [] + nullable = false } variable "allowed_security_groups" { type = list(string) - default = [] description = "List of Security Group IDs to be allowed to connect to the EKS cluster" + default = [] + nullable = false } variable "allowed_cidr_blocks" { type = list(string) - default = [] description = "List of CIDR blocks to be allowed to connect to the EKS cluster" + default = [] + nullable = false } variable "subnet_type_tag_key" { type = string - default = null description = "The tag used to find the private subnets to find by availability zone. If null, will be looked up in vpc outputs." + default = null } variable "color" { type = string - default = "" description = "The cluster stage represented by a color; e.g. blue, green" + default = "" + nullable = false } variable "node_groups" { # will create 1 node group for each item in map type = map(object({ # EKS AMI version to use, e.g. "1.16.13-20200821" (no "v"). - ami_release_version = string + ami_release_version = optional(string, null) # Type of Amazon Machine Image (AMI) associated with the EKS Node Group - ami_type = string + ami_type = optional(string, null) # Additional attributes (e.g. `1`) for the node group - attributes = list(string) + attributes = optional(list(string), null) # will create 1 auto scaling group in each specified availability zone - availability_zones = list(string) + # or all AZs with subnets if none are specified anywhere + availability_zones = optional(list(string), null) # Whether to enable Node Group to scale its AutoScaling Group - cluster_autoscaler_enabled = bool + cluster_autoscaler_enabled = optional(bool, null) # True to create new node_groups before deleting old ones, avoiding a temporary outage - create_before_destroy = bool + create_before_destroy = optional(bool, null) # Desired number of worker nodes when initially provisioned - desired_group_size = number - # Enable disk encryption for the created launch template (if we aren't provided with an existing launch template) - disk_encryption_enabled = bool - # Disk size in GiB for worker nodes. Terraform will only perform drift detection if a configuration value is provided. - disk_size = number + desired_group_size = optional(number, null) # Set of instance types associated with the EKS Node Group. Terraform will only perform drift detection if a configuration value is provided. - instance_types = list(string) + instance_types = optional(list(string), null) # Key-value mapping of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed - kubernetes_labels = map(string) + kubernetes_labels = optional(map(string), null) # List of objects describing Kubernetes taints. - kubernetes_taints = list(object({ + kubernetes_taints = optional(list(object({ key = string value = string effect = string - })) + })), null) + node_userdata = optional(object({ + before_cluster_joining_userdata = optional(string) + bootstrap_extra_args = optional(string) + kubelet_extra_args = optional(string) + after_cluster_joining_userdata = optional(string) + }), {}) # Desired Kubernetes master version. If you do not specify a value, the latest available version is used - kubernetes_version = string + kubernetes_version = optional(string, null) # The maximum size of the AutoScaling Group - max_group_size = number + max_group_size = optional(number, null) # The minimum size of the AutoScaling Group - min_group_size = number + min_group_size = optional(number, null) # List of auto-launched resource types to tag - resources_to_tag = list(string) - tags = map(string) + resources_to_tag = optional(list(string), null) + tags = optional(map(string), null) + + # block_device_map copied from cloudposse/terraform-aws-eks-node-group + # Keep in sync via copy and paste, but make optional. + # Most of the time you want "/dev/xvda". For BottleRocket, use "/dev/xvdb". + block_device_map = optional(map(object({ + no_device = optional(bool, null) + virtual_name = optional(string, null) + ebs = optional(object({ + delete_on_termination = optional(bool, true) + encrypted = optional(bool, true) + iops = optional(number, null) + kms_key_id = optional(string, null) + snapshot_id = optional(string, null) + throughput = optional(number, null) # for gp3, MiB/s, up to 1000 + volume_size = optional(number, 20) # Disk size in GB + volume_type = optional(string, "gp3") + + # Catch common camel case typos. These have no effect, they just generate better errors. + # It would be nice to actually use these, but volumeSize in particular is a number here + # and in most places it is a string with a unit suffix (e.g. 20Gi) + # Without these defined, they would be silently ignored and the default values would be used instead, + # which is difficult to debug. + deleteOnTermination = optional(any, null) + kmsKeyId = optional(any, null) + snapshotId = optional(any, null) + volumeSize = optional(any, null) + volumeType = optional(any, null) + })) + })), null) + + # DEPRECATED: + # Enable disk encryption for the created launch template (if we aren't provided with an existing launch template) + # DEPRECATED: disk_encryption_enabled is DEPRECATED, use `block_device_map` instead. + disk_encryption_enabled = optional(bool, null) + # Disk size in GiB for worker nodes. Terraform will only perform drift detection if a configuration value is provided. + # DEPRECATED: disk_size is DEPRECATED, use `block_device_map` instead. + disk_size = optional(number, null) + })) + description = "List of objects defining a node group for the cluster" default = {} + nullable = false } variable "node_group_defaults" { # Any value in the node group that is null will be replaced # by the value in this object, which can also be null type = object({ - ami_release_version = string - ami_type = string - attributes = list(string) - availability_zones = list(string) # set to null to use var.availability_zones - cluster_autoscaler_enabled = bool - create_before_destroy = bool - desired_group_size = number - disk_encryption_enabled = bool - disk_size = number - instance_types = list(string) - kubernetes_labels = map(string) - kubernetes_taints = list(object({ + ami_release_version = optional(string, null) + ami_type = optional(string, null) + attributes = optional(list(string), null) + availability_zones = optional(list(string)) # set to null to use var.availability_zones + cluster_autoscaler_enabled = optional(bool, null) + create_before_destroy = optional(bool, null) + desired_group_size = optional(number, null) + instance_types = optional(list(string), null) + kubernetes_labels = optional(map(string), {}) + kubernetes_taints = optional(list(object({ key = string value = string effect = string - })) - kubernetes_version = string # set to null to use cluster_kubernetes_version - max_group_size = number - min_group_size = number - resources_to_tag = list(string) - tags = map(string) + })), []) + node_userdata = optional(object({ + before_cluster_joining_userdata = optional(string) + bootstrap_extra_args = optional(string) + kubelet_extra_args = optional(string) + after_cluster_joining_userdata = optional(string) + }), {}) + kubernetes_version = optional(string, null) # set to null to use cluster_kubernetes_version + max_group_size = optional(number, null) + min_group_size = optional(number, null) + resources_to_tag = optional(list(string), null) + tags = optional(map(string), null) + + # block_device_map copied from cloudposse/terraform-aws-eks-node-group + # Keep in sync via copy and paste, but make optional + # Most of the time you want "/dev/xvda". For BottleRocket, use "/dev/xvdb". + block_device_map = optional(map(object({ + no_device = optional(bool, null) + virtual_name = optional(string, null) + ebs = optional(object({ + delete_on_termination = optional(bool, true) + encrypted = optional(bool, true) + iops = optional(number, null) + kms_key_id = optional(string, null) + snapshot_id = optional(string, null) + throughput = optional(number, null) # for gp3, MiB/s, up to 1000 + volume_size = optional(number, 50) # disk size in GB + volume_type = optional(string, "gp3") + + # Catch common camel case typos. These have no effect, they just generate better errors. + # It would be nice to actually use these, but volumeSize in particular is a number here + # and in most places it is a string with a unit suffix (e.g. 20Gi) + # Without these defined, they would be silently ignored and the default values would be used instead, + # which is difficult to debug. + deleteOnTermination = optional(any, null) + kmsKeyId = optional(any, null) + snapshotId = optional(any, null) + volumeSize = optional(any, null) + volumeType = optional(any, null) + })) + })), null) + + # DEPRECATED: disk_encryption_enabled is DEPRECATED, use `block_device_map` instead. + disk_encryption_enabled = optional(bool, null) + # DEPRECATED: disk_size is DEPRECATED, use `block_device_map` instead. + disk_size = optional(number, null) }) + description = "Defaults for node groups in the cluster" + default = { - ami_release_version = null - ami_type = null - attributes = null - availability_zones = null - cluster_autoscaler_enabled = true - create_before_destroy = true - desired_group_size = 1 - disk_encryption_enabled = true - disk_size = 20 - instance_types = ["t3.medium"] - kubernetes_labels = null - kubernetes_taints = null - kubernetes_version = null # set to null to use cluster_kubernetes_version - max_group_size = 100 - min_group_size = null - resources_to_tag = null - tags = null + desired_group_size = 1 + # t3.medium is kept as the default for backward compatibility. + # Recommendation as of 2023-08-08 is c6a.large to provide reserve HA capacity regardless of Karpenter behavior. + instance_types = ["t3.medium"] + kubernetes_version = null # set to null to use cluster_kubernetes_version + max_group_size = 100 + + block_device_map = { + "/dev/xvda" = { + ebs = { + encrypted = true + volume_size = 20 # GB + volume_type = "gp2" # Should be gp3, but left as gp2 for backwards compatibility + } + } + } } -} - -variable "iam_roles_environment_name" { - type = string - description = "The name of the environment where the IAM roles are provisioned" - default = "gbl" -} - -variable "iam_primary_roles_stage_name" { - type = string - description = "The name of the stage where the IAM primary roles are provisioned" - default = "identity" -} - -variable "iam_primary_roles_tenant_name" { - type = string - description = "The name of the tenant where the IAM primary roles are provisioned" - default = null + nullable = false } variable "cluster_encryption_config_enabled" { type = bool - default = true description = "Set to `true` to enable Cluster Encryption Configuration" + default = true + nullable = false } variable "cluster_encryption_config_kms_key_id" { type = string - default = "" description = "KMS Key ID to use for cluster encryption config" + default = "" + nullable = false } variable "cluster_encryption_config_kms_key_enable_key_rotation" { type = bool - default = true description = "Cluster Encryption Config KMS Key Resource argument - enable kms key rotation" + default = true + nullable = false } variable "cluster_encryption_config_kms_key_deletion_window_in_days" { type = number - default = 10 description = "Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction" + default = 10 + nullable = false } variable "cluster_encryption_config_kms_key_policy" { type = string - default = null description = "Cluster Encryption Config KMS Key Resource argument - key policy" + default = null } variable "cluster_encryption_config_resources" { - type = list(any) + type = list(string) + description = "Cluster Encryption Config Resources to encrypt, e.g. `[\"secrets\"]`" default = ["secrets"] - description = "Cluster Encryption Config Resources to encrypt, e.g. ['secrets']" + nullable = false } variable "aws_ssm_agent_enabled" { type = bool description = "Set true to attach the required IAM policy for AWS SSM agent to each EC2 instance's IAM Role" default = false -} - -variable "kubeconfig_file" { - type = string - default = "" - description = "Name of `kubeconfig` file to use to configure Kubernetes provider" -} - -variable "kubeconfig_file_enabled" { - type = bool - default = false - description = <<-EOF - Set true to configure Kubernetes provider with a `kubeconfig` file specified by `kubeconfig_file`. - Mainly for when the standard configuration produces a Terraform error. - EOF -} - -variable "aws_auth_yaml_strip_quotes" { - type = bool - default = true - description = "If true, remove double quotes from the generated aws-auth ConfigMap YAML to reduce spurious diffs in plans" + nullable = false } variable "cluster_private_subnets_only" { type = bool - default = false description = "Whether or not to enable private subnets or both public and private subnets" + default = false + nullable = false } variable "allow_ingress_from_vpc_accounts" { - type = any - default = [] + type = any + description = <<-EOF List of account contexts to pull VPC ingress CIDR and add to cluster security group. @@ -351,18 +440,23 @@ variable "allow_ingress_from_vpc_accounts" { tenant = "core" } EOF + + default = [] + nullable = false } -variable "eks_component_name" { +variable "vpc_component_name" { type = string - description = "The name of the eks component" - default = "eks/cluster" + description = "The name of the vpc component" + default = "vpc" + nullable = false } variable "karpenter_iam_role_enabled" { type = bool description = "Flag to enable/disable creation of IAM role for EC2 Instance Profile that is attached to the nodes launched by Karpenter" default = false + nullable = false } variable "fargate_profiles" { @@ -370,14 +464,17 @@ variable "fargate_profiles" { kubernetes_namespace = string kubernetes_labels = map(string) })) + description = "Fargate Profiles config" default = {} + nullable = false } variable "fargate_profile_iam_role_kubernetes_namespace_delimiter" { type = string description = "Delimiter for the Kubernetes namespace in the IAM Role name for Fargate Profiles" default = "-" + nullable = false } variable "fargate_profile_iam_role_permissions_boundary" { @@ -385,3 +482,87 @@ variable "fargate_profile_iam_role_permissions_boundary" { description = "If provided, all Fargate Profiles IAM roles will be created with this permissions boundary attached" default = null } + +variable "addons" { + type = map(object({ + enabled = optional(bool, true) + addon_version = optional(string, null) + # configuration_values is a JSON string, such as '{"computeType": "Fargate"}'. + configuration_values = optional(string, null) + # Set default resolve_conflicts to OVERWRITE because it is required on initial installation of + # add-ons that have self-managed versions installed by default (e.g. vpc-cni, coredns), and + # because any custom configuration that you would want to preserve should be managed by Terraform. + resolve_conflicts_on_create = optional(string, "OVERWRITE") + resolve_conflicts_on_update = optional(string, "OVERWRITE") + service_account_role_arn = optional(string, null) + create_timeout = optional(string, null) + update_timeout = optional(string, null) + delete_timeout = optional(string, null) + })) + + description = "Manages [EKS addons](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources" + default = {} + nullable = false +} + +variable "deploy_addons_to_fargate" { + type = bool + description = "Set to `true` (not recommended) to deploy addons to Fargate instead of initial node pool" + default = false + nullable = false +} + +variable "addons_depends_on" { + type = bool + + description = <<-EOT + If set `true` (recommended), all addons will depend on managed node groups provisioned by this component and therefore not be installed until nodes are provisioned. + See [issue #170](https://github.com/cloudposse/terraform-aws-eks-cluster/issues/170) for more details. + EOT + + default = true + nullable = false +} + +variable "legacy_fargate_1_role_per_profile_enabled" { + type = bool + description = <<-EOT + Set to `false` for new clusters to create a single Fargate Pod Execution role for the cluster. + Set to `true` for existing clusters to preserve the old behavior of creating + a Fargate Pod Execution role for each Fargate Profile. + EOT + default = true + nullable = false +} + +variable "legacy_do_not_create_karpenter_instance_profile" { + type = bool + description = <<-EOT + **Obsolete:** The issues this was meant to mitigate were fixed in AWS Terraform Provider v5.43.0 + and Karpenter v0.33.0. This variable will be removed in a future release. + Remove this input from your configuration and leave it at default. + **Old description:** When `true` (the default), suppresses creation of the IAM Instance Profile + for nodes launched by Karpenter, to preserve the legacy behavior of + the `eks/karpenter` component creating it. + Set to `false` to enable creation of the IAM Instance Profile, which + ensures that both the role and the instance profile have the same lifecycle, + and avoids AWS Provider issue [#32671](https://github.com/hashicorp/terraform-provider-aws/issues/32671). + Use in conjunction with `eks/karpenter` component `legacy_create_karpenter_instance_profile`. + EOT + default = true +} + +variable "access_config" { + type = object({ + authentication_mode = optional(string, "API") + bootstrap_cluster_creator_admin_permissions = optional(bool, false) + }) + description = "Access configuration for the EKS cluster" + default = {} + nullable = false + + validation { + condition = !contains(["CONFIG_MAP"], var.access_config.authentication_mode) + error_message = "The CONFIG_MAP authentication_mode is not supported." + } +} diff --git a/modules/eks/cluster/versions.tf b/modules/eks/cluster/versions.tf index cc73ffd35..601150b50 100644 --- a/modules/eks/cluster/versions.tf +++ b/modules/eks/cluster/versions.tf @@ -1,10 +1,23 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" version = ">= 4.9.0" } + random = { + source = "hashicorp/random" + version = ">= 3.0" + } + # We no longer use the Kubernetes provider, so we can remove it, + # but since there are bugs in the current version, we keep this as a comment. + # kubernetes = { + # source = "hashicorp/kubernetes" + # # Version 2.25 and higher have bugs, so we cannot allow them, + # # but automation enforces that we have no upper limit. + # # It is less critical here, because the Kubernetes provider is being removed entirely. + # version = "2.24" + # } } } diff --git a/modules/eks/datadog-agent/CHANGELOG.md b/modules/eks/datadog-agent/CHANGELOG.md new file mode 100644 index 000000000..06748cc00 --- /dev/null +++ b/modules/eks/datadog-agent/CHANGELOG.md @@ -0,0 +1,66 @@ +## PR [#814](https://github.com/cloudposse/terraform-aws-components/pull/814) + +### Possible Breaking Change + +Removed inputs `iam_role_enabled` and `iam_policy_statements` because the Datadog agent does not need an IAM (IRSA) role +or any special AWS permissions because it works solely within the Kubernetes environment. (Datadog has AWS integrations +to handle monitoring that requires AWS permissions.) + +This only a breaking change if you were setting these inputs. If you were, simply remove them from your configuration. + +### Possible Breaking Change + +Previously this component directly created the Kubernetes namespace for the agent when `create_namespace` was set to +`true`. Now this component delegates that responsibility to the `helm-release` module, which better coordinates the +destruction of resources at destruction time (for example, ensuring that the Helm release is completely destroyed and +finalizers run before deleting the namespace). + +Generally the simplest upgrade path is to destroy the Helm release, then destroy the namespace, then apply the new +configuration. Alternatively, you can use `terraform state mv` to move the existing namespace to the new Terraform +"address", which will preserve the existing deployment and reduce the possibility of the destroy failing and leaving the +Kubernetes cluster in a bad state. + +### Cluster Agent Redundancy + +In this PR we have defaulted the number of Cluster Agents to 2. This is because when there are no Cluster Agents, all +cluster metrics are lost. Having 2 agents makes it possible to keep 1 agent running at all times, even when the other is +on a node being drained. + +### DNS Resolution Enhancement + +If Datadog processes are looking for where to send data and are configured to look up +`datadog.monitoring.svc.cluster.local`, by default the cluster will make a DNS query for each of the following: + +1. `datadog.monitoring.svc.cluster.local.monitoring.svc.cluster.local` +2. `datadog.monitoring.svc.cluster.local.svc.cluster.local` +3. `datadog.monitoring.svc.cluster.local.cluster.local` +4. `datadog.monitoring.svc.cluster.local.ec2.internal` +5. `datadog.monitoring.svc.cluster.local` + +due to the DNS resolver's +[search path](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#namespaces-of-services). Because +this lookup happens so frequently (several times a second in a production environment), it can cause a lot of +unnecessary work, even if the DNS query is cached. + +In this PR we have set `ndots: 2` in the agent and cluster agent configuration so that only the 5th query is made. (In +Kubernetes, the default value for `ndots` is 5. DNS queries having fewer than `ndots` dots in them will be attempted +using each component of the search path in turn until a match is found, while those with more dots, or with a final dot, +are looked up as is.) + +Alternately, where you are setting the host name to be resolved, you can add a final dot at the end so that the search +path is not used, e.g. `datadog.monitoring.svc.cluster.local.` + +### Note for Bottlerocket users + +If you are using Bottlerocket, you will want to uncomment the following from `values.yaml` or add it to your `values` +input: + +```yaml +criSocketPath: /run/dockershim.sock # Bottlerocket Only +env: # Bottlerocket Only + - name: DD_AUTOCONFIG_INCLUDE_FEATURES # Bottlerocket Only + value: "containerd" # Bottlerocket Only +``` + +See the [Datadog documentation](https://docs.datadoghq.com/containers/kubernetes/distributions/?tab=helm#EKS) for +details. diff --git a/modules/datadog-agent/README.md b/modules/eks/datadog-agent/README.md similarity index 76% rename from modules/datadog-agent/README.md rename to modules/eks/datadog-agent/README.md index 24c18d9af..23a4d2419 100644 --- a/modules/datadog-agent/README.md +++ b/modules/eks/datadog-agent/README.md @@ -1,15 +1,15 @@ -# Component: `datadog-agent` +--- +tags: + - component/eks/datadog-agent + - layer/datadog + - provider/aws + - provider/helm + - provider/datadog +--- -This component installs the `datadog-agent` for EKS clusters. - -Note that pending https://tanzle.atlassian.net/browse/SRE-268 & https://cloudposse.atlassian.net/browse/MEROPE-381 , failed Terraform applies for this component may leave state & live release config inconsistent resulting in out-of-sync configuration but a no-change plan. +# Component: `eks/datadog-agent` -If you're getting a "No changes" plan when you know the live release config doesn't match the new values, force a taint/recreate of the Helm release with a Spacelift task for the stack like this: `terraform apply -replace='module.datadog_agent.helm_release.this[0]' -auto-approve`. - -Locally this looks like -```shell -atmos terraform deploy datadog-agent -s ${region}-${stage} -replace='module.datadog_agent.helm_release.this[0]' -``` +This component installs the `datadog-agent` for EKS clusters. ## Usage @@ -26,21 +26,47 @@ components: workspace_enabled: true vars: enabled: true + eks_component_name: eks/cluster name: "datadog" description: "Datadog Kubernetes Agent" kubernetes_namespace: "monitoring" create_namespace: true repository: "https://helm.datadoghq.com" chart: "datadog" - chart_version: "3.0.0" - timeout: 600 + chart_version: "3.29.2" + timeout: 1200 wait: true atomic: true cleanup_on_fail: true + cluster_checks_enabled: false + helm_manifest_experiment_enabled: false + secrets_store_type: SSM + tags: + team: sre + service: datadog-agent + app: monitoring + # datadog-agent shouldn't be deployed to the Fargate nodes + values: + agents: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + datadog: + env: + - name: DD_EC2_PREFER_IMDSV2 # this merges ec2 instances and the node in the hostmap section + value: "true" ``` Deploy this to a particular environment such as dev, prod, etc. +This will add cluster checks to a specific environment. + ```yaml components: terraform: @@ -51,33 +77,69 @@ components: - catalog/cluster-checks/defaults/*.yaml - catalog/cluster-checks/dev/*.yaml datadog_cluster_check_config_parameters: {} + # add additional tags to all data coming in from this agent. datadog_tags: - "env:dev" - "region:us-west-2" - "stage:dev" ``` -# Cluster Checks +## Cluster Checks -Cluster Checks are configurations that allow us to setup external URLs to be monitored. They can be configured through the datadog agent or annotations on kubernetes services. +Cluster Checks are configurations that allow us to setup external URLs to be monitored. They can be configured through +the datadog agent or annotations on kubernetes services. -Cluster Checks are similar to synthetics checks, they are not as indepth, but significantly cheaper. Use Cluster Checks when you need a simple health check beyond the kubernetes pod health check. +Cluster Checks are similar to synthetics checks, they are not as indepth, but significantly cheaper. Use Cluster Checks +when you need a simple health check beyond the kubernetes pod health check. -Public addresses that test endpoints must use the agent configuration, whereas service addresses internal to the cluster can be tested by annotations. +Public addresses that test endpoints must use the agent configuration, whereas service addresses internal to the cluster +can be tested by annotations. -## Adding Cluster Checks +### Adding Cluster Checks Cluster Checks can be enabled or disabled via the `cluster_checks_enabled` variable. We recommend this be set to true. -New Cluster Checks can be added to defaults to be applied in every account. Alternatively they can be placed in an individual stage folder which will be applied to individual stages. This is controlled by the `datadog_cluster_check_config_parameters` variable, which determines the paths of yaml files to look for cluster checks per stage. +New Cluster Checks can be added to defaults to be applied in every account. Alternatively they can be placed in an +individual stage folder which will be applied to individual stages. This is controlled by the +`datadog_cluster_check_config_parameters` variable, which determines the paths of yaml files to look for cluster checks +per stage. + +Once they are added, and properly configured, the new checks show up in the network monitor creation under `ssl` and +`Http` -Once they are added, and properly configured, the new checks show up in the network monitor creation under `ssl` and `Http` +**Please note:** the yaml file name doesn't matter, but the root key inside which is `something.yaml` does matter. this +is following +[datadogs docs](https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/?tab=helm#configuration-from-static-configuration-files) +for `.yaml`. -**Please note:** the yaml file name doesn't matter, but the root key inside which is `something.yaml` does matter. this is following [datadogs docs](https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/?tab=helm#configuration-from-static-configuration-files) for .yaml. +#### Sample Yaml -## Monitoring Cluster Checks +> [!WARNING] +> +> The key of a filename must match datadog docs, which is `.yaml` > +> [Datadog Cluster Checks](https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/?tab=helm#configuration-from-static-configuration-files) -Using Cloudposse's `datadog-monitor` component. The following yaml snippet will monitor all HTTP Cluster Checks, this can be added to each stage (usually via a defaults folder). +Cluster Checks **can** be used for external URL testing (loadbalancer endpoints), whereas annotations **must** be used +for kubernetes services. + +``` +http_check.yaml: + cluster_check: true + init_config: + instances: + - name: "[${stage}] Echo Server" + url: "https://echo.${stage}.uw2.acme.com" + - name: "[${stage}] Portal" + url: "https://portal.${stage}.uw2.acme.com" + - name: "[${stage}] ArgoCD" + url: "https://argocd.${stage}.uw2.acme.com" + +``` + +### Monitoring Cluster Checks + +Using Cloudposse's `datadog-monitor` component. The following yaml snippet will monitor all HTTP Cluster Checks, this +can be added to each stage (usually via a defaults folder). ```yaml https-checks: @@ -89,7 +151,7 @@ https-checks: HTTPS Check failed on {{instance.name}} in Stage: {{stage.name}} escalation_message: "" - tags: + tags: managed-by: Terraform notify_no_data: false notify_audit: false @@ -104,7 +166,7 @@ https-checks: new_host_delay: 0 new_group_delay: 0 no_data_timeframe: 2 - threshold_windows: { } + threshold_windows: {} thresholds: critical: 1 warning: 1 @@ -113,53 +175,47 @@ https-checks: ## References -* https://github.com/DataDog/helm-charts/tree/main/charts/datadog -* https://github.com/DataDog/helm-charts/blob/main/charts/datadog/values.yaml -* https://github.com/DataDog/helm-charts/blob/main/examples/datadog/agent_basic_values.yaml -* https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release -* https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/?tab=helm +- https://github.com/DataDog/helm-charts/tree/main/charts/datadog +- https://github.com/DataDog/helm-charts/blob/main/charts/datadog/values.yaml +- https://github.com/DataDog/helm-charts/blob/main/examples/datadog/agent_basic_values.yaml +- https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release +- https://docs.datadoghq.com/agent/cluster_agent/clusterchecks/?tab=helm + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | -| [helm](#requirement\_helm) | >= 2.3.0 | -| [utils](#requirement\_utils) | >= 0.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [helm](#requirement\_helm) | >= 2.7 | +| [kubernetes](#requirement\_kubernetes) | >= 2.14.0, != 2.21.0 | +| [utils](#requirement\_utils) | >= 1.10.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 4.9.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [datadog\_agent](#module\_datadog\_agent) | cloudposse/helm-release/aws | 0.6.0 | -| [datadog\_cluster\_check\_yaml\_config](#module\_datadog\_cluster\_check\_yaml\_config) | cloudposse/config/yaml | 1.0.1 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [datadog\_agent](#module\_datadog\_agent) | cloudposse/helm-release/aws | 0.10.0 | +| [datadog\_cluster\_check\_yaml\_config](#module\_datadog\_cluster\_check\_yaml\_config) | cloudposse/config/yaml | 1.0.2 | +| [datadog\_configuration](#module\_datadog\_configuration) | ../../datadog-configuration/modules/datadog_keys | n/a | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [values\_merge](#module\_values\_merge) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | ## Resources | Name | Type | |------|------| -| [kubernetes_namespace.default](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | -| [aws_eks_cluster.kubernetes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | | [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_eks_cluster_auth.kubernetes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_secretsmanager_secret.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | -| [aws_secretsmanager_secret.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | -| [aws_secretsmanager_secret_version.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | -| [aws_secretsmanager_secret_version.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | -| [aws_ssm_parameter.datadog_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | -| [aws_ssm_parameter.datadog_app_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | ## Inputs @@ -174,8 +230,6 @@ https-checks: | [cluster\_checks\_enabled](#input\_cluster\_checks\_enabled) | Enable Cluster Checks for the Datadog Agent | `bool` | `false` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [create\_namespace](#input\_create\_namespace) | Create the Kubernetes namespace if it does not yet exist | `bool` | `true` | no | -| [datadog\_api\_secret\_key](#input\_datadog\_api\_secret\_key) | The key of the Datadog API secret | `string` | `"datadog/datadog_api_key"` | no | -| [datadog\_app\_secret\_key](#input\_datadog\_app\_secret\_key) | The key of the Datadog Application secret | `string` | `"datadog/datadog_app_key"` | no | | [datadog\_cluster\_check\_auto\_added\_tags](#input\_datadog\_cluster\_check\_auto\_added\_tags) | List of tags to add to Datadog Cluster Check | `list(string)` |
[
"stage",
"environment"
]
| no | | [datadog\_cluster\_check\_config\_parameters](#input\_datadog\_cluster\_check\_config\_parameters) | Map of parameters to Datadog Cluster Check configurations | `map(any)` | `{}` | no | | [datadog\_cluster\_check\_config\_paths](#input\_datadog\_cluster\_check\_config\_paths) | List of paths to Datadog Cluster Check configurations | `list(string)` | `[]` | no | @@ -186,17 +240,16 @@ https-checks: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the EKS component. Used to get the remote state | `string` | `"eks/eks"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -210,11 +263,11 @@ https-checks: | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [repository](#input\_repository) | Repository URL where to locate the requested chart | `string` | `null` | no | -| [secrets\_store\_type](#input\_secrets\_store\_type) | Secret store type for Datadog API and app keys. Valid values: `SSM`, `ASM` | `string` | `"SSM"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | +| [values](#input\_values) | Additional values to yamlencode as `helm_release` values. | `any` | `{}` | no | | [verify](#input\_verify) | Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart | `bool` | `false` | no | | [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true` | `bool` | `null` | no | @@ -222,10 +275,13 @@ https-checks: | Name | Description | |------|-------------| -| [cluster\_checks](#output\_cluster\_checks) | n/a | +| [cluster\_checks](#output\_cluster\_checks) | Cluster Checks for the cluster | | [metadata](#output\_metadata) | Block status of the deployed release | + ## References -* Datadog's [Kubernetes Agent documentation](https://docs.datadoghq.com/containers/kubernetes/) -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/datadog-agent) - Cloud Posse's upstream component + +- Datadog's [Kubernetes Agent documentation](https://docs.datadoghq.com/containers/kubernetes/) +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/datadog-agent) - + Cloud Posse's upstream component diff --git a/modules/datadog-agent/catalog/cluster-checks/defaults/http_checks.yaml b/modules/eks/datadog-agent/catalog/cluster-checks/defaults/http_checks.yaml similarity index 99% rename from modules/datadog-agent/catalog/cluster-checks/defaults/http_checks.yaml rename to modules/eks/datadog-agent/catalog/cluster-checks/defaults/http_checks.yaml index 0f33229b3..cdde56c92 100644 --- a/modules/datadog-agent/catalog/cluster-checks/defaults/http_checks.yaml +++ b/modules/eks/datadog-agent/catalog/cluster-checks/defaults/http_checks.yaml @@ -4,4 +4,3 @@ http_check.yaml: instances: - name: "[${stage}] Echo Server" url: "https://echo.${stage}.acme.com" - diff --git a/modules/datadog-agent/catalog/cluster-checks/dev/http_checks.yaml b/modules/eks/datadog-agent/catalog/cluster-checks/dev/http_checks.yaml similarity index 100% rename from modules/datadog-agent/catalog/cluster-checks/dev/http_checks.yaml rename to modules/eks/datadog-agent/catalog/cluster-checks/dev/http_checks.yaml diff --git a/modules/eks/datadog-agent/context.tf b/modules/eks/datadog-agent/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/datadog-agent/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/datadog-agent/helm-variables.tf b/modules/eks/datadog-agent/helm-variables.tf new file mode 100644 index 000000000..fade04b95 --- /dev/null +++ b/modules/eks/datadog-agent/helm-variables.tf @@ -0,0 +1,63 @@ +variable "description" { + type = string + description = "Release description attribute (visible in the history)" + default = null +} + +variable "chart" { + type = string + description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended" +} + +variable "repository" { + type = string + description = "Repository URL where to locate the requested chart" + default = null +} + +variable "chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed" + default = null +} + +variable "kubernetes_namespace" { + type = string + description = "Kubernetes namespace to install the release into" +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = null +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails" + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used" + default = true +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`" + default = null +} + +variable "create_namespace" { + type = bool + description = "Create the Kubernetes namespace if it does not yet exist" + default = true +} + +variable "verify" { + type = bool + description = "Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart" + default = false +} diff --git a/modules/datadog-agent/main.tf b/modules/eks/datadog-agent/main.tf similarity index 58% rename from modules/datadog-agent/main.tf rename to modules/eks/datadog-agent/main.tf index 876e0e809..a22c81898 100644 --- a/modules/datadog-agent/main.tf +++ b/modules/eks/datadog-agent/main.tf @@ -3,19 +3,15 @@ locals { tags = module.this.tags - datadog_api_key = local.enabled ? (var.secrets_store_type == "ASM" ? ( - data.aws_secretsmanager_secret_version.datadog_api_key[0].secret_string) : - data.aws_ssm_parameter.datadog_api_key[0].value - ) : null - - datadog_app_key = local.enabled ? (var.secrets_store_type == "ASM" ? ( - data.aws_secretsmanager_secret_version.datadog_app_key[0].secret_string) : - data.aws_ssm_parameter.datadog_app_key[0].value - ) : null + datadog_api_key = module.datadog_configuration.datadog_api_key + datadog_app_key = module.datadog_configuration.datadog_app_key + datadog_site = module.datadog_configuration.datadog_site # combine context tags with passed in datadog_tags # skip name since that won't be relevant for each metric - datadog_tags = toset(distinct(concat([for k, v in module.this.tags : "${lower(k)}:${v}" if lower(k) != "name"], tolist(var.datadog_tags)))) + datadog_tags = toset(distinct(concat([ + for k, v in module.this.tags : "${lower(k)}:${v}" if lower(k) != "name" + ], tolist(var.datadog_tags)))) cluster_checks_enabled = local.enabled && var.cluster_checks_enabled @@ -28,10 +24,10 @@ locals { datadog_cluster_checks = { for k, v in local.deep_map_merge : k => merge(v, { - instances : [ + instances = [ for key, val in v.instances : merge(val, { - tags : [ + tags = [ for tag, tag_value in local.context_tags : format("%s:%s", tag, tag_value) if contains(var.datadog_cluster_check_auto_added_tags, tag) @@ -40,21 +36,27 @@ locals { ] }) } - set_datadog_cluster_checks = [for cluster_check_key, cluster_check_value in local.datadog_cluster_checks : - { + set_datadog_cluster_checks = [ + for cluster_check_key, cluster_check_value in local.datadog_cluster_checks : { # Since we are using json pathing to set deep yaml values, and the key we want to set is `something.yaml` # we need to escape the key of the cluster check. name = format("clusterAgent.confd.%s", replace(cluster_check_key, ".", "\\.")) type = "auto" value = yamlencode(cluster_check_value) - }] + } + ] +} + +module "datadog_configuration" { + source = "../../datadog-configuration/modules/datadog_keys" + context = module.this.context } module "datadog_cluster_check_yaml_config" { count = local.cluster_checks_enabled ? 1 : 0 source = "cloudposse/config/yaml" - version = "1.0.1" + version = "1.0.2" map_config_local_base_path = path.module map_config_paths = var.datadog_cluster_check_config_paths @@ -69,37 +71,43 @@ module "datadog_cluster_check_yaml_config" { context = module.this.context } -resource "kubernetes_namespace" "default" { - count = local.enabled && var.create_namespace ? 1 : 0 - - metadata { - name = var.kubernetes_namespace +module "values_merge" { + source = "cloudposse/config/yaml//modules/deepmerge" + version = "1.0.2" - labels = local.tags - } + # Merge in order: datadog values, var.values + maps = [ + yamldecode( + file("${path.module}/values.yaml") + ), + var.values, + ] } + module "datadog_agent" { source = "cloudposse/helm-release/aws" - version = "0.6.0" - - name = module.this.name - chart = var.chart - description = var.description - repository = var.repository - chart_version = var.chart_version - kubernetes_namespace = join("", kubernetes_namespace.default.*.id) - create_namespace = false - verify = var.verify - wait = var.wait - atomic = var.atomic - cleanup_on_fail = var.cleanup_on_fail - timeout = var.timeout + version = "0.10.0" + + name = module.this.name + chart = var.chart + description = var.description + repository = var.repository + chart_version = var.chart_version + + kubernetes_namespace = var.kubernetes_namespace + create_namespace_with_kubernetes = var.create_namespace + + verify = var.verify + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout eks_cluster_oidc_issuer_url = module.eks.outputs.eks_cluster_identity_oidc_issuer values = [ - file("${path.module}/values.yaml") + yamlencode(module.values_merge.merged) ] set_sensitive = [ @@ -112,6 +120,11 @@ module "datadog_agent" { name = "datadog.appKey" type = "string" value = local.datadog_app_key + }, + { + name = "datadog.site" + type = "string" + value = local.datadog_site } ] @@ -125,8 +138,10 @@ module "datadog_agent" { name = "datadog.clusterName" type = "string" value = module.eks.outputs.eks_cluster_id - } + }, ], local.set_datadog_cluster_checks) - depends_on = [kubernetes_namespace.default] + iam_role_enabled = false + + context = module.this.context } diff --git a/modules/datadog-agent/outputs.tf b/modules/eks/datadog-agent/outputs.tf similarity index 65% rename from modules/datadog-agent/outputs.tf rename to modules/eks/datadog-agent/outputs.tf index 331f07a18..f63ad1975 100644 --- a/modules/datadog-agent/outputs.tf +++ b/modules/eks/datadog-agent/outputs.tf @@ -4,5 +4,6 @@ output "metadata" { } output "cluster_checks" { - value = local.datadog_cluster_checks + value = local.datadog_cluster_checks + description = "Cluster Checks for the cluster" } diff --git a/modules/eks/datadog-agent/provider-helm.tf b/modules/eks/datadog-agent/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/modules/eks/datadog-agent/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/datadog-agent/providers.tf b/modules/eks/datadog-agent/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/eks/datadog-agent/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/datadog-agent/remote-state.tf b/modules/eks/datadog-agent/remote-state.tf new file mode 100644 index 000000000..c1ec8226d --- /dev/null +++ b/modules/eks/datadog-agent/remote-state.tf @@ -0,0 +1,8 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} diff --git a/modules/eks/datadog-agent/values.yaml b/modules/eks/datadog-agent/values.yaml new file mode 100644 index 000000000..b8215b2ab --- /dev/null +++ b/modules/eks/datadog-agent/values.yaml @@ -0,0 +1,117 @@ +registry: public.ecr.aws/datadog +datadog: + logLevel: INFO + ## If running on Bottlerocket OS, uncomment the following lines. + ## See https://docs.datadoghq.com/containers/kubernetes/distributions/?tab=helm#EKS + # criSocketPath: /run/dockershim.sock # Bottlerocket Only + # env: # Bottlerocket Only + # - name: DD_AUTOCONFIG_INCLUDE_FEATURES # Bottlerocket Only + # value: "containerd" # Bottlerocket Only + + ## kubeStateMetricsEnabled is false because the feature is obsolete (replaced by kubeStateMetricsCore). + ## See https://github.com/DataDog/helm-charts/issues/415#issuecomment-943117608 + ## https://docs.datadoghq.com/integrations/kubernetes_state_core/?tab=helm + ## https://www.datadoghq.com/blog/kube-state-metrics-v2-monitoring-datadog/ + kubeStateMetricsEnabled: false + kubeStateMetricsCore: + enabled: true + collectVpaMetrics: true + collectCrdMetrics: true + collectEvents: true + leaderElection: true + remoteConfiguration: + enabled: true + logs: + enabled: true + containerCollectAll: true + containerCollectUsingFiles: true + apm: + enabled: true + socketEnabled: true + useSocketVolume: true + serviceMonitoring: + enabled: true + processAgent: + enabled: true + processCollection: true + systemProbe: + enableTCPQueueLength: true + enableOOMKill: true + collectDNSStats: true + enableConntrack: true + bpfDebug: false + orchestratorExplorer: + enabled: true + networkMonitoring: + enabled: false + clusterTagger: + collectKubernetesTags: true + clusterChecksRunner: + enabled: false + clusterChecks: + enabled: true + dogstatsd: + useHostPort: true + nonLocalTraffic: true + securityAgent: + runtime: + enabled: false + compliance: + enabled: true + helmCheck: + enabled: true + collectEvents: true +clusterAgent: + admissionController: + enabled: true + mutateUnlabelled: false + configMode: "hostip" + + enabled: true + # Maintain 2 cluster agents so that there is no interruption in metrics collection + # when the cluster agents' node is being deprovisioned. + replicas: 2 + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + ## without ndots: 2, DNS will try to resolve a DNS lookup 5 different ways + dnsConfig: + options: + - name: ndots + value: "2" + image: + pullPolicy: IfNotPresent + metricsProvider: + enabled: false + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 300m + memory: 512Mi +agents: + enabled: true + priorityClassName: "system-node-critical" + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + ## without ndots: 2, DNS will try to resolve a DNS lookup 5 different ways + dnsConfig: + options: + - name: ndots + value: "2" + # Per https://github.com/DataDog/helm-charts/blob/main/charts/datadog/README.md#configuration-required-for-amazon-linux-2-based-nodes + podSecurity: + apparmor: + enabled: false + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate diff --git a/modules/datadog-agent/variables.tf b/modules/eks/datadog-agent/variables.tf similarity index 69% rename from modules/datadog-agent/variables.tf rename to modules/eks/datadog-agent/variables.tf index bdc7fa99d..89d16a61b 100644 --- a/modules/datadog-agent/variables.tf +++ b/modules/eks/datadog-agent/variables.tf @@ -3,24 +3,6 @@ variable "region" { description = "AWS Region" } -variable "secrets_store_type" { - type = string - description = "Secret store type for Datadog API and app keys. Valid values: `SSM`, `ASM`" - default = "SSM" -} - -variable "datadog_api_secret_key" { - type = string - description = "The key of the Datadog API secret" - default = "datadog/datadog_api_key" -} - -variable "datadog_app_secret_key" { - type = string - description = "The key of the Datadog Application secret" - default = "datadog/datadog_app_key" -} - variable "datadog_tags" { type = set(string) description = "List of static tags to attach to every metric, event and service check collected by the agent" @@ -56,3 +38,9 @@ variable "eks_component_name" { description = "The name of the EKS component. Used to get the remote state" default = "eks/eks" } + +variable "values" { + type = any + description = "Additional values to yamlencode as `helm_release` values." + default = {} +} diff --git a/modules/eks/datadog-agent/versions.tf b/modules/eks/datadog-agent/versions.tf new file mode 100644 index 000000000..b104e91ca --- /dev/null +++ b/modules/eks/datadog-agent/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.7" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.10.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.14.0, != 2.21.0" + } + } +} diff --git a/modules/eks/echo-server/CHANGELOG.md b/modules/eks/echo-server/CHANGELOG.md new file mode 100644 index 000000000..5dc0fb54a --- /dev/null +++ b/modules/eks/echo-server/CHANGELOG.md @@ -0,0 +1,17 @@ +## Changes in PR #893, components version ~v1.337.0 + +- Moved `eks/echo-server` v1.147.0 to `/deprecated/eks/echo-server` for those who still need it and do not want to + switch. It may later become the basis for an example app or something similar. +- Removed dependency on and connection to the `eks/alb-controller-ingress-group` component +- Added liveness probe, and disabled logging of probe requests. Probe request logging can be restored by setting + `livenessProbeLogging: true` in `chart_values` +- This component no longer configures automatic redirects from HTTP to HTTPS. This is because for ALB controller, + setting that on one ingress sets it for all ingresses in the same IngressGroup, and it is a design goal that deploying + this component does not affect other Ingresses (with the obvious exception of possibly being the first to create the + Application Load Balancer). +- Removed from `chart_values`:`ingress.nginx.class` (was set to "nginx") and `ingress.alb.class` (was set to "alb"). + IngressClass should usually not be set, as this component is intended to be used to test the defaults, including the + default IngressClass. However, if you do want to set it, you can do so by setting `ingress.class` in `chart_values`. +- Removed the deprecated `kubernetes.io/ingress.class` annotation by default. It can be restored by setting + `ingress.use_ingress_class_annotation: true` in `chart_values`. IngressClass is now set using the preferred + `ingressClassName` field of the Ingress resource. diff --git a/modules/eks/echo-server/README.md b/modules/eks/echo-server/README.md index dc94bec58..8ad731f57 100644 --- a/modules/eks/echo-server/README.md +++ b/modules/eks/echo-server/README.md @@ -1,28 +1,51 @@ +--- +tags: + - component/eks/echo-server + - layer/eks + - provider/aws + - provider/echo-server +--- + # Component: `eks/echo-server` -This is copied from [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/echo-server). +This is copied from +[cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/echo-server). -This component installs the [Ealenn/Echo-Server](https://github.com/Ealenn/Echo-Server) to EKS clusters. -The echo server is a server that sends it back to the client a JSON representation of all the data -the server received, which is a combination of information sent by the client and information sent -by the web server infrastructure. For further details, please consult the [Echo-Server documentation](https://ealenn.github.io/Echo-Server/). +This component installs the [Ealenn/Echo-Server](https://github.com/Ealenn/Echo-Server) to EKS clusters. The echo server +is a server that sends it back to the client a JSON representation of all the data the server received, which is a +combination of information sent by the client and information sent by the web server infrastructure. For further +details, please consult the [Echo-Server documentation](https://ealenn.github.io/Echo-Server/). ## Prerequisites -Echo server is intended to provide end-to-end testing of everything needed to deploy an application or service with a public HTTPS endpoint. -Therefore, it requires several other components. -At the moment, it supports 2 configurations: +Echo server is intended to provide end-to-end testing of everything needed to deploy an application or service with a +public HTTPS endpoint. It uses defaults where possible, such as using the default IngressClass, in order to verify that +the defaults are sufficient for a typical application. + +In order to minimize the impact of the echo server on the rest of the cluster, it does not set any configuration that +would affect other ingresses, such as WAF rules, logging, or redirecting HTTP to HTTPS. Those settings should be +configured in the IngressClass where possible. + +Therefore, it requires several other components. At the moment, it supports 2 configurations: 1. ALB with ACM Certificate - - AWS Load Balancer Controller (ALB) version 2.2.0 or later, with ACM certificate auto-discovery enabled - - Pre-provisioned ACM TLS certificate covering the provisioned host name (typically a wildcard certificate covering all hosts in the domain) + +- AWS Load Balancer Controller (ALB) version 2.2.0 or later, with ACM certificate auto-discovery enabled +- A default IngressClass, which can be provisioned by the `alb-controller` component as part of deploying the + controller, or can be provisioned separately, for example by the `alb-controller-ingress-class` component. +- Pre-provisioned ACM TLS certificate covering the provisioned host name (typically a wildcard certificate covering all + hosts in the domain) + 2. Nginx with Cert Manager Certificate - - Nginx (via `kubernetes/ingress-nginx` controller). We recommend `ingress-nginx` v1.1.0 or later, but `echo-server` - should work with any version that supports Ingress API version `networking.k8s.io/v1`. - - `jetstack/cert-manager` configured to automatically (via Ingress Shim, installed by default) generate TLS certificates via a Cluster Issuer - (by default, named `letsEncrypt-prod`). + +- Nginx (via `kubernetes/ingress-nginx` controller). We recommend `ingress-nginx` v1.1.0 or later, but `echo-server` + should work with any version that supports Ingress API version `networking.k8s.io/v1`. +- `jetstack/cert-manager` configured to automatically (via Ingress Shim, installed by default) generate TLS certificates + via a Cluster Issuer (by default, named `letsEncrypt-prod`). In both configurations, it has these common requirements: + +- EKS component deployed, with component name specified in `eks_component_name` (defaults to "eks/cluster") - Kubernetes version 1.19 or later - Ingress API version `networking.k8s.io/v1` - [kubernetes-sigs/external-dns](https://github.com/kubernetes-sigs/external-dns) @@ -31,10 +54,9 @@ In both configurations, it has these common requirements: ## Warnings A Terraform plan may fail to apply, giving a Kubernetes authentication failure. This is due to a known issue with -Terraform and the Kubernetes provider. During the "plan" phase Terraform gets a short-lived Kubernetes -authentication token and caches it, and then tries to use it during "apply". If the token has expired by -the time you try to run "apply", the "apply" will fail. The workaround is to run `terraform apply -auto-approve` without -a "plan" file. +Terraform and the Kubernetes provider. During the "plan" phase Terraform gets a short-lived Kubernetes authentication +token and caches it, and then tries to use it during "apply". If the token has expired by the time you try to run +"apply", the "apply" will fail. The workaround is to run `terraform apply -auto-approve` without a "plan" file. ## Usage @@ -42,6 +64,26 @@ a "plan" file. Use this in the catalog or use these variables to overwrite the catalog values. +Set `ingress_type` to "alb" if using `alb-controller` or "nginx" if using `ingress-nginx`. + +Normally, you should not set the IngressClass or IngressGroup, as this component is intended to test the defaults. +However, if you need to, set them in `chart_values`: + +```yaml +chart_values: + ingress: + class: "other-ingress-class" + alb: + # IngressGroup is specific to alb-controller + group_name: "other-ingress-group" +``` + +Note that if you follow recommendations and do not set the ingress class name, the deployed Ingress will have the +ingressClassName setting injected by the Ingress controller, set to the then-current default. This means that if later +you change the default IngressClass, the Ingress will be NOT be updated to use the new default. Furthermore, because of +limitations in the Helm provider, this will not be detected as drift. You will need to destroy and re-deploy the echo +server to update the Ingress to the new default. + ```yaml components: terraform: @@ -60,11 +102,15 @@ components: atomic: true cleanup_on_fail: true - ingress_type: "alb" + ingress_type: "alb" # or "nginx" # %[1]v is the tenant name, %[2]v is the stage name, %[3]v is the region name hostname_template: "echo.%[3]v.%[2]v.%[1]v.sample-domain.net" ``` +In rare cases where some ingress controllers do not support the `ingressClassName` field, you can restore the old +`kubernetes.io/ingress.class` annotation by setting `ingress.use_ingress_class_annotation: true` in `chart_values`. + + ## Requirements @@ -73,7 +119,7 @@ components: | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.0 | | [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | ## Providers @@ -85,8 +131,8 @@ components: | Name | Source | Version | |------|--------|---------| -| [echo\_server](#module\_echo\_server) | cloudposse/helm-release/aws | 0.7.0 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [echo\_server](#module\_echo\_server) | cloudposse/helm-release/aws | 0.10.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -94,9 +140,7 @@ components: | Name | Type | |------|------| -| [aws_eks_cluster.kubernetes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | | [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_eks_cluster_auth.kubernetes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | ## Inputs @@ -116,11 +160,9 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [hostname\_template](#input\_hostname\_template) | The `format()` string to use to generate the hostname via `format(var.hostname_template, var.tenant, var.stage, var.environment)`"
Typically something like `"echo.%[3]v.%[2]v.example.com"`. | `string` | n/a | yes | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [ingress\_type](#input\_ingress\_type) | Set to 'nginx' to create an ingress resource relying on an NGiNX backend for the echo-server service. Set to 'alb' to create an ingress resource relying on an AWS ALB backend for the echo-server service. Leave blank to not create any ingress for the echo-server service. | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | @@ -128,7 +170,8 @@ components: | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -153,8 +196,11 @@ components: | Name | Description | |------|-------------| +| [hostname](#output\_hostname) | Hostname of the deployed echo server | | [metadata](#output\_metadata) | Block status of the deployed release | + ## References -* https://github.com/Ealenn/Echo-Server + +- https://github.com/Ealenn/Echo-Server diff --git a/modules/eks/echo-server/charts/echo-server/Chart.yaml b/modules/eks/echo-server/charts/echo-server/Chart.yaml index 03519d53b..8fae0334e 100644 --- a/modules/eks/echo-server/charts/echo-server/Chart.yaml +++ b/modules/eks/echo-server/charts/echo-server/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.0 +version: 0.4.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.2.0" +appVersion: "0.8.0" diff --git a/modules/eks/echo-server/charts/echo-server/templates/deployment.yaml b/modules/eks/echo-server/charts/echo-server/templates/deployment.yaml index 1e85f1c36..1eade38de 100644 --- a/modules/eks/echo-server/charts/echo-server/templates/deployment.yaml +++ b/modules/eks/echo-server/charts/echo-server/templates/deployment.yaml @@ -24,7 +24,35 @@ spec: args: # Disable the feature that turns the echo server into a file browser on the server (security risk) - "--enable:file=false" + {{- if eq (printf "%v" .Values.livenessProbeLogging) "false" }} + - "--logs:ignore:ping=true" + {{- end }} ports: - name: http containerPort: 80 protocol: TCP + livenessProbe: + httpGet: + port: http + path: /ping + httpHeaders: + - name: x-echo-code + value: "200" + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 2 + failureThreshold: 3 + successThreshold: 1 + {{- with index .Values "resources" }} + resources: + {{- with index . "limits" }} + limits: + cpu: {{ index . "cpu" | default "50m" }} + memory: {{ index . "memory" | default "128Mi" }} + {{- end }} + {{- with index . "requests" }} + requests: + cpu: {{ index . "cpu" | default "50m" }} + memory: {{ index . "memory" | default "128Mi" }} + {{- end }} + {{- end }} diff --git a/modules/eks/echo-server/charts/echo-server/templates/ingress.yaml b/modules/eks/echo-server/charts/echo-server/templates/ingress.yaml index f5e6473fa..703af694c 100644 --- a/modules/eks/echo-server/charts/echo-server/templates/ingress.yaml +++ b/modules/eks/echo-server/charts/echo-server/templates/ingress.yaml @@ -2,46 +2,57 @@ {{- $fullName := include "echo-server.fullname" . -}} {{- $svcName := include "echo-server.name" . -}} {{- $svcPort := .Values.service.port -}} - {{- $nginxTlsEnabled := and (eq (printf "%v" .Values.ingress.nginx.enabled) "true") (eq (printf "%v" .Values.tlsEnabled) "true")}} + {{- $nginxTlsEnabled := and (eq (printf "%v" .Values.ingress.nginx.enabled) "true") (eq (printf "%v" .Values.tlsEnabled) "true") }} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: {{ $fullName }} annotations: - {{- if eq (printf "%v" .Values.ingress.nginx.enabled) "true" }} - {{- if (index .Values.ingress.nginx "tls_certificate_cluster_issuer") }} - cert-manager.io/cluster-issuer: {{ .Values.ingress.nginx.tls_certificate_cluster_issuer }} - {{- end }} - {{- else if eq (printf "%v" .Values.ingress.alb.enabled) "true" }} - alb.ingress.kubernetes.io/target-type: 'ip' - {{- if eq (printf "%v" .Values.ingress.alb.ssl_redirect.enabled) "true" }} - alb.ingress.kubernetes.io/ssl-redirect: '{{ .Values.ingress.alb.ssl_redirect.port }}' + {{- with and (eq (printf "%v" .Values.ingress.use_ingress_class_annotation) "true") (index .Values.ingress "class") }} + kubernetes.io/ingress.class: {{ . }} + {{- end }} + {{- with and $nginxTlsEnabled (index .Values.ingress.nginx "tls_certificate_cluster_issuer") }} + cert-manager.io/cluster-issuer: {{ . }} + {{- end }} + {{- if eq (printf "%v" .Values.ingress.alb.enabled) "true" }} + alb.ingress.kubernetes.io/healthcheck-path: /ping + {{- with index .Values.ingress.alb "group_name" }} + alb.ingress.kubernetes.io/group.name: {{ . }} {{- end }} {{- if eq (printf "%v" .Values.tlsEnabled) "true" }} alb.ingress.kubernetes.io/backend-protocol: HTTP - alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80},{"HTTPS":443}]' + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80},{"HTTPS":443}]' {{- else }} - alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}]' + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80}]' {{- end }} + # See https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.5/guide/ingress/annotations/#target-type + alb.ingress.kubernetes.io/target-type: {{ if eq (printf "%v" .Values.service.type) "NodePort" -}} "instance" {{- else -}} "ip" {{- end }} {{- end }} labels: {{- include "echo-server.labels" . | nindent 4 }} spec: + # If not specified, the Ingress controller will insert the ingressClassName field + # when creating the Ingress resource, setting ingressClassName to the name of the then-default IngressClass. + {{- with and (ne (printf "%v" .Values.ingress.use_ingress_class_annotation) "true") (index .Values.ingress "class") }} + ingressClassName: {{ . }} + {{- end }} + # ALB controller will auto-discover the ACM certificate based on rules[].host + # Nginx needs explicit configuration of location of cert-manager TLS certificate {{- if $nginxTlsEnabled }} tls: # < placing a host in the TLS config will indicate a certificate should be created - - hosts: - - {{ .Values.ingress.hostname }} - secretName: {{ $svcName }}-cert # < cert-manager will store the created certificate in this secret. + - hosts: + - {{ .Values.ingress.hostname }} + secretName: {{ $svcName }}-cert # < cert-manager will store the created certificate in this secret. {{- end }} rules: - - host: {{ .Values.ingress.hostname }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: {{ $svcName }} - port: - number: {{ $svcPort }} + - host: {{ .Values.ingress.hostname }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} {{- end }} diff --git a/modules/eks/echo-server/charts/echo-server/values.yaml b/modules/eks/echo-server/charts/echo-server/values.yaml index 777654c4d..0f5e270a2 100644 --- a/modules/eks/echo-server/charts/echo-server/values.yaml +++ b/modules/eks/echo-server/charts/echo-server/values.yaml @@ -8,79 +8,48 @@ image: # image.repository -- https://hub.docker.com/r/ealen/echo-server repository: ealen/echo-server # image.tag -- https://github.com/Ealenn/Echo-Server/releases - tag: 0.4.2 - pullPolicy: Always + tag: 0.8.12 + pullPolicy: IfNotPresent #imagePullSecrets: [] nameOverride: "" #fullnameOverride: "" -#serviceAccount: -# # Specifies whether a service account should be created -# create: true -# # Annotations to add to the service account -# annotations: {} -# # The name of the service account to use. -# # If not set and create is true, a name is generated using the fullname template -# name: "" - -#podAnnotations: {} - -#podSecurityContext: {} -# # fsGroup: 2000 - -#securityContext: {} -# # capabilities: -# # drop: -# # - ALL -# # readOnlyRootFilesystem: true -# # runAsNonRoot: true -# # runAsUser: 1000 service: type: ClusterIP port: 80 tlsEnabled: true +# If livenessProbeLogging is false, requests to /ping will not be logged +livenessProbeLogging: false ingress: + ## Allow class to be specified, but use default class (not class named "default") by default + # class: default + + # Use deprecated `kubernetes.io/ingress.class` annotation + use_ingress_class_annotation: false nginx: # ingress.nginx.enabled -- Enable NGiNX ingress enabled: false - # annotation values - ## kubernetes.io/ingress.class: - class: "nginx" - ## cert-manager.io/cluster-issuer: tls_certificate_cluster_issuer: "letsencrypt-prod" alb: - enabled: true - # annotation values - ## kubernetes.io/ingress.class: - class: "alb" - ## alb.ingress.kubernetes.io/load-balancer-name: - ### load_balancer_name: "k8s-common" - ## alb.ingress.kubernetes.io/group.name: - ### group_name: "common" - ssl_redirect: - enabled: true - ## alb.ingress.kubernetes.io/ssl-redirect: - port: 443 - access_logs: - enabled: false - ## s3_bucket_name: "acme-ue2-prod-eks-cluster-alb-access-logs" - s3_bucket_prefix: "echo-server" + enabled: false + ## Allow group to be specified, but use default by default + # group_name: common -#resources: {} -# # We usually recommend not to specify default resources and to leave this as a conscious -# # choice for the user. This also increases chances charts run on environments with little -# # resources, such as Minikube. If you do want to specify resources, uncomment the following -# # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# # limits: -# # cpu: 100m -# # memory: 128Mi -# # requests: -# # cpu: 100m -# # memory: 128Mi + # Do NOT allow SSL redirect to be specified, because that affects other ingresses. + # "Once defined on a single Ingress, it impacts every Ingress within IngressGroup." + # See https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/guide/ingress/annotations/#ssl-redirect + +resources: + limits: + cpu: 50m + memory: 128Mi +# requests: +# cpu: 50m +# memory: 128Mi autoscaling: enabled: false @@ -88,9 +57,3 @@ autoscaling: #maxReplicas: 100 #targetCPUUtilizationPercentage: 80 #targetMemoryUtilizationPercentage: 80 - -#nodeSelector: {} - -#tolerations: [] - -#affinity: {} diff --git a/modules/eks/echo-server/main.tf b/modules/eks/echo-server/main.tf index c11f21f9c..5d24c2681 100644 --- a/modules/eks/echo-server/main.tf +++ b/modules/eks/echo-server/main.tf @@ -1,12 +1,13 @@ locals { - enabled = module.this.enabled ingress_nginx_enabled = var.ingress_type == "nginx" ? true : false ingress_alb_enabled = var.ingress_type == "alb" ? true : false + + hostname = module.this.enabled ? format(var.hostname_template, var.tenant, var.stage, var.environment) : null } module "echo_server" { source = "cloudposse/helm-release/aws" - version = "0.7.0" + version = "0.10.1" name = module.this.name chart = "${path.module}/charts/echo-server" @@ -30,7 +31,7 @@ module "echo_server" { set = [ { name = "ingress.hostname" - value = format(var.hostname_template, var.tenant, var.stage, var.environment) + value = local.hostname type = "auto" }, { @@ -52,4 +53,3 @@ module "echo_server" { context = module.this.context } - diff --git a/modules/eks/echo-server/outputs.tf b/modules/eks/echo-server/outputs.tf index 3199457ce..05893b697 100644 --- a/modules/eks/echo-server/outputs.tf +++ b/modules/eks/echo-server/outputs.tf @@ -2,3 +2,8 @@ output "metadata" { value = try(one(module.echo_server.metadata), null) description = "Block status of the deployed release" } + +output "hostname" { + value = local.hostname + description = "Hostname of the deployed echo server" +} diff --git a/modules/eks/echo-server/provider-helm.tf b/modules/eks/echo-server/provider-helm.tf index 20e4d3837..91cc7f6d4 100644 --- a/modules/eks/echo-server/provider-helm.tf +++ b/modules/eks/echo-server/provider-helm.tf @@ -2,6 +2,12 @@ # # This file is a drop-in to provide a helm provider. # +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# # All the following variables are just about configuring the Kubernetes provider # to be able to modify EKS cluster. The reason there are so many options is # because at various times, each one of them has had problems, so we give you a choice. @@ -15,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -36,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -45,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -95,14 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] - certificate_authority_data = module.eks.outputs.eks_cluster_certificate_authority_data - eks_cluster_id = module.eks.outputs.eks_cluster_id - eks_cluster_endpoint = module.eks.outputs.eks_cluster_endpoint + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -113,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -132,21 +174,22 @@ provider "helm" { } } experiments { - manifest = var.helm_manifest_experiment_enabled + manifest = var.helm_manifest_experiment_enabled && module.this.enabled } } provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/echo-server/providers.tf b/modules/eks/echo-server/providers.tf index 2775903d2..89ed50a98 100644 --- a/modules/eks/echo-server/providers.tf +++ b/modules/eks/echo-server/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,27 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -data "aws_eks_cluster" "kubernetes" { - count = local.enabled ? 1 : 0 - - name = module.eks.outputs.eks_cluster_id -} - -data "aws_eks_cluster_auth" "kubernetes" { - count = local.enabled ? 1 : 0 - - name = module.eks.outputs.eks_cluster_id -} diff --git a/modules/eks/echo-server/remote-state.tf b/modules/eks/echo-server/remote-state.tf index 90c6ab1a8..c1ec8226d 100644 --- a/modules/eks/echo-server/remote-state.tf +++ b/modules/eks/echo-server/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/echo-server/versions.tf b/modules/eks/echo-server/versions.tf index b7a1a1986..fb8857fab 100644 --- a/modules/eks/echo-server/versions.tf +++ b/modules/eks/echo-server/versions.tf @@ -12,7 +12,7 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.7.1" + version = ">= 2.7.1, != 2.21.0" } } } diff --git a/modules/eks/efs-controller/default.auto.tfvars b/modules/eks/efs-controller/default.auto.tfvars deleted file mode 100644 index 5b0464c79..000000000 --- a/modules/eks/efs-controller/default.auto.tfvars +++ /dev/null @@ -1,5 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - -name = "efs-controller" diff --git a/modules/eks/efs/default.auto.tfvars b/modules/eks/efs/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/eks/efs/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/eks/eks-without-spotinst/default.auto.tfvars b/modules/eks/eks-without-spotinst/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/eks/eks-without-spotinst/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/eks/external-dns/README.md b/modules/eks/external-dns/README.md index 823d5782f..0b8f02345 100644 --- a/modules/eks/external-dns/README.md +++ b/modules/eks/external-dns/README.md @@ -1,6 +1,16 @@ +--- +tags: + - component/eks/external-dns + - layer/eks + - provider/aws + - provider/helm +--- + # Component: `eks/external-dns` -This component creates a Helm deployment for [external-dns](https://github.com/bitnami/bitnami-docker-external-dns) on a Kubernetes cluster. [external-dns](https://github.com/bitnami/bitnami-docker-external-dns) is a Kubernetes addon that configures public DNS servers with information about exposed Kubernetes services to make them discoverable. +This component creates a Helm deployment for [external-dns](https://github.com/bitnami/bitnami-docker-external-dns) on a +Kubernetes cluster. [external-dns](https://github.com/bitnami/bitnami-docker-external-dns) is a Kubernetes addon that +configures public DNS servers with information about exposed Kubernetes services to make them discoverable. ## Usage @@ -25,17 +35,21 @@ components: name: external-dns chart: external-dns chart_repository: https://charts.bitnami.com/bitnami - chart_version: "6.7.5" + chart_version: "6.33.0" create_namespace: true kubernetes_namespace: external-dns - - # Resources - limit_cpu: "200m" - limit_memory: "256Mi" - request_cpu: "100m" - request_memory: "128Mi" - + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + # Set this to a unique value to avoid conflicts with other external-dns instances managing the same zones. + # For example, when using blue-green deployment pattern to update EKS cluster. + txt_prefix: "" # You can use `chart_values` to set any other chart options. Treat `chart_values` as the root of the doc. + # See documentation for latest chart version and list of chart_values: https://artifacthub.io/packages/helm/bitnami/external-dns # # # For example # --- @@ -43,105 +57,115 @@ components: # aws: # batchChangeSize: 1000 chart_values: {} + # Extra hosted zones to lookup and support by component name + dns_components: + - component: dns-primary + - component: dns-delegated + - component: dns-delegated/abc + - component: dns-delegated/123 + environment: "gbl" # Optional (default "gbl") ``` + ## Requirements -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 4.9.0 | -| [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | +| Name | Version | +|------------------------------------------------------------------------------|---------------------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | ## Providers -| Name | Version | -|------|---------| +| Name | Version | +|---------------------------------------------------|----------| | [aws](#provider\_aws) | >= 4.9.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | -| [external\_dns](#module\_external\_dns) | cloudposse/helm-release/aws | 0.7.0 | +| [additional\_dns\_components](#module\_additional\_dns\_components) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [dns\_gbl\_primary](#module\_dns\_gbl\_primary) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [external\_dns](#module\_external\_dns) | cloudposse/helm-release/aws | 0.10.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources -| Name | Type | -|------|------| +| Name | Type | +|-----------------------------------------------------------------------------------------------------------------------------|-------------| | [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | ## Inputs -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | -| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | -| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | n/a | yes | -| [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `null` | no | -| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | n/a | yes | -| [chart\_values](#input\_chart\_values) | Addition map values to yamlencode as `helm_release` values. | `any` | `{}` | no | -| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `null` | no | -| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | -| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [crd\_enabled](#input\_crd\_enabled) | Install and use the integrated DNSEndpoint CRD. | `bool` | `false` | no | -| [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false`. | `bool` | `null` | no | -| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | -| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [dns\_gbl\_delegated\_environment\_name](#input\_dns\_gbl\_delegated\_environment\_name) | The name of the environment where global `dns_delegated` is provisioned | `string` | `"gbl"` | no | -| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | -| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | -| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | -| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | -| [istio\_enabled](#input\_istio\_enabled) | Add istio gateways to monitored sources. | `bool` | `false` | no | -| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | -| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | -| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | -| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | -| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | -| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | -| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | -| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | -| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | -| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | n/a | yes | -| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | -| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | -| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | -| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | -| [metrics\_enabled](#input\_metrics\_enabled) | Whether or not to enable metrics in the helm chart. | `bool` | `false` | no | -| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | -| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [policy](#input\_policy) | Modify how DNS records are synchronized between sources and providers (options: sync, upsert-only) | `string` | `"sync"` | no | -| [publish\_internal\_services](#input\_publish\_internal\_services) | Allow external-dns to publish DNS records for ClusterIP services | `bool` | `true` | no | -| [rbac\_enabled](#input\_rbac\_enabled) | Service Account for pods. | `bool` | `true` | no | -| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | -| [region](#input\_region) | AWS Region. | `string` | n/a | yes | -| [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
|
{
"limits": {
"cpu": "200m",
"memory": "256Mi"
},
"requests": {
"cpu": "100m",
"memory": "128Mi"
}
}
| no | -| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | -| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | -| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | -| [txt\_prefix](#input\_txt\_prefix) | Prefix to create a TXT record with a name following the pattern prefix.. | `string` | `"external-dns"` | no | -| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `null` | no | +| Name | Description | Type | Default | Required | +|----------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | n/a | yes | +| [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `null` | no | +| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | n/a | yes | +| [chart\_values](#input\_chart\_values) | Addition map values to yamlencode as `helm_release` values. | `any` | `{}` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `null` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [crd\_enabled](#input\_crd\_enabled) | Install and use the integrated DNSEndpoint CRD. | `bool` | `false` | no | +| [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false`. | `bool` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dns\_gbl\_delegated\_environment\_name](#input\_dns\_gbl\_delegated\_environment\_name) | The name of the environment where global `dns_delegated` is provisioned | `string` | `"gbl"` | no | +| [dns\_gbl\_primary\_environment\_name](#input\_dns\_gbl\_primary\_environment\_name) | The name of the environment where global `dns_primary` is provisioned | `string` | `"gbl"` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [istio\_enabled](#input\_istio\_enabled) | Add istio gateways to monitored sources. | `bool` | `false` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | n/a | yes | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [metrics\_enabled](#input\_metrics\_enabled) | Whether or not to enable metrics in the helm chart. | `bool` | `false` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [policy](#input\_policy) | Modify how DNS records are synchronized between sources and providers (options: sync, upsert-only) | `string` | `"sync"` | no | +| [publish\_internal\_services](#input\_publish\_internal\_services) | Allow external-dns to publish DNS records for ClusterIP services | `bool` | `true` | no | +| [rbac\_enabled](#input\_rbac\_enabled) | Service Account for pods. | `bool` | `true` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region. | `string` | n/a | yes | +| [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
|
{
"limits": {
"cpu": "200m",
"memory": "256Mi"
},
"requests": {
"cpu": "100m",
"memory": "128Mi"
}
}
| no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | +| [txt\_prefix](#input\_txt\_prefix) | Prefix to create a TXT record with a name following the pattern prefix.``. | `string` | `"external-dns"` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `null` | no | ## Outputs -| Name | Description | -|------|-------------| +| Name | Description | +|--------------------------------------------------------------|--------------------------------------| | [metadata](#output\_metadata) | Block status of the deployed release | + ## References diff --git a/modules/eks/external-dns/main.tf b/modules/eks/external-dns/main.tf index 9292bd032..058aa7ccf 100644 --- a/modules/eks/external-dns/main.tf +++ b/modules/eks/external-dns/main.tf @@ -8,7 +8,9 @@ locals { txt_owner = var.txt_prefix != "" ? format(module.this.tenant != null ? "%[1]s-%[2]s-%[3]s-%[4]s" : "%[1]s-%[2]s-%[4]s", var.txt_prefix, module.this.environment, module.this.tenant, module.this.stage) : "" txt_prefix = var.txt_prefix != "" ? format("%s-", local.txt_owner) : "" zone_ids = compact(concat( - values(module.dns_gbl_delegated.outputs.zones)[*].zone_id + values(module.dns_gbl_delegated.outputs.zones)[*].zone_id, + values(module.dns_gbl_primary.outputs.zones)[*].zone_id, + flatten([for k, v in module.additional_dns_components : [for i, j in v.outputs.zones : j.zone_id]]) )) } @@ -18,7 +20,7 @@ data "aws_partition" "current" { module "external_dns" { source = "cloudposse/helm-release/aws" - version = "0.7.0" + version = "0.10.0" name = module.this.name chart = var.chart @@ -96,7 +98,7 @@ module "external_dns" { publishInternalServices = var.publish_internal_services txtOwnerId = local.txt_owner txtPrefix = local.txt_prefix - source = local.sources + sources = local.sources }), # hardcoded values file("${path.module}/resources/values.yaml"), diff --git a/modules/eks/external-dns/provider-helm.tf b/modules/eks/external-dns/provider-helm.tf index 9bb5edb6f..91cc7f6d4 100644 --- a/modules/eks/external-dns/provider-helm.tf +++ b/modules/eks/external-dns/provider-helm.tf @@ -21,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -42,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -51,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -101,16 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] # Provide dummy configuration for the case where the EKS cluster is not available. - certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") - eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -121,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -146,15 +180,16 @@ provider "helm" { provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/external-dns/providers.tf b/modules/eks/external-dns/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/eks/external-dns/providers.tf +++ b/modules/eks/external-dns/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/external-dns/remote-state.tf b/modules/eks/external-dns/remote-state.tf index a6d442848..9f15458c7 100644 --- a/modules/eks/external-dns/remote-state.tf +++ b/modules/eks/external-dns/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name @@ -9,7 +9,7 @@ module "eks" { module "dns_gbl_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = "dns-delegated" environment = var.dns_gbl_delegated_environment_name @@ -20,3 +20,30 @@ module "dns_gbl_delegated" { zones = {} } } + +module "dns_gbl_primary" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "dns-primary" + environment = var.dns_gbl_primary_environment_name + + context = module.this.context + + ignore_errors = true + + defaults = { + zones = {} + } +} + +module "additional_dns_components" { + for_each = { for obj in var.dns_components : obj.component => obj } + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = each.value.component + environment = coalesce(each.value.environment, "gbl") + + context = module.this.context +} diff --git a/modules/eks/external-dns/variables.tf b/modules/eks/external-dns/variables.tf index a63780e4a..8689b5530 100644 --- a/modules/eks/external-dns/variables.tf +++ b/modules/eks/external-dns/variables.tf @@ -99,7 +99,7 @@ variable "chart_values" { variable "txt_prefix" { type = string default = "external-dns" - description = "Prefix to create a TXT record with a name following the pattern prefix.." + description = "Prefix to create a TXT record with a name following the pattern prefix.``." } variable "crd_enabled" { @@ -126,6 +126,22 @@ variable "dns_gbl_delegated_environment_name" { default = "gbl" } +variable "dns_gbl_primary_environment_name" { + type = string + description = "The name of the environment where global `dns_primary` is provisioned" + default = "gbl" +} + + +variable "dns_components" { + type = list(object({ + component = string, + environment = optional(string) + })) + description = "A list of additional DNS components to search for ZoneIDs" + default = [] +} + variable "publish_internal_services" { type = bool description = "Allow external-dns to publish DNS records for ClusterIP services" diff --git a/modules/eks/external-dns/versions.tf b/modules/eks/external-dns/versions.tf index c8087b1b8..61ea676a2 100644 --- a/modules/eks/external-dns/versions.tf +++ b/modules/eks/external-dns/versions.tf @@ -12,7 +12,7 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.7.1" + version = ">= 2.7.1, != 2.21.0" } } } diff --git a/modules/eks/external-secrets-operator/CHANGELOG.md b/modules/eks/external-secrets-operator/CHANGELOG.md new file mode 100644 index 000000000..2a073f4d6 --- /dev/null +++ b/modules/eks/external-secrets-operator/CHANGELOG.md @@ -0,0 +1,7 @@ +## Components PR [[eks/external-secrets-operator] Set default chart](https://github.com/cloudposse/terraform-aws-components/pull/856) + +This is a bug fix and feature enhancement update. No actions necessary to upgrade. + +## Fixes + +- Set default chart diff --git a/modules/eks/external-secrets-operator/README.md b/modules/eks/external-secrets-operator/README.md new file mode 100644 index 000000000..413d201fa --- /dev/null +++ b/modules/eks/external-secrets-operator/README.md @@ -0,0 +1,207 @@ +--- +tags: + - component/eks/external-secrets-operator + - layer/eks + - provider/aws + - provider/helm +--- + +# Component: `eks/external-secrets-operator` + +This component (ESO) is used to create an external `SecretStore` configured to synchronize secrets from AWS SSM +Parameter store as Kubernetes Secrets within the cluster. Per the operator pattern, the `external-secret-operator` pods +will watch for any `ExternalSecret` resources which reference the `SecretStore` to pull secrets from. + +In practice, this means apps will define an `ExternalSecret` that pulls all env into a single secret as part of a helm +chart; e.g.: + +``` +# Part of the charts in `/releases + +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: app-secrets +spec: + refreshInterval: 30s + secretStoreRef: + name: "secret-store-parameter-store" # Must match name of the Cluster Secret Store created by this component + kind: ClusterSecretStore + target: + creationPolicy: Owner + name: app-secrets + dataFrom: + - find: + name: + regexp: "^/app/" # Match the path prefix of your service + rewrite: + - regexp: + source: "/app/(.*)" # Remove the path prefix of your service from the name before creating the envars + target: "$1" +``` + +This component assumes secrets are prefixed by "service" in parameter store (e.g. `/app/my_secret`). The `SecretStore`. +The component is designed to pull secrets from a `path` prefix (defaulting to `"app"`). This should work nicely along +`chamber` which uses this same path (called a "service" in Chamber). For example, developers should store keys like so. + +```bash +assume-role acme-platform-gbl-sandbox-admin +chamber write app MY_KEY my-value +``` + +See `docs/recipes.md` for more information on managing secrets. + +## Usage + +**Stack Level**: Regional + +Use this in the catalog or use these variables to overwrite the catalog values. + +```yaml +components: + terraform: + eks/external-secrets-operator: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: "external-secrets-operator" + helm_manifest_experiment_enabled: false + chart: "external-secrets" + chart_repository: "https://charts.external-secrets.io" + chart_version: "0.8.3" + kubernetes_namespace: "secrets" + create_namespace: true + timeout: 90 + wait: true + atomic: true + cleanup_on_fail: true + tags: + Team: sre + Service: external-secrets-operator + resources: + limits: + cpu: "100m" + memory: "300Mi" + requests: + cpu: "20m" + memory: "60Mi" + parameter_store_paths: + - app + - rds + # You can use `chart_values` to set any other chart options. Treat `chart_values` as the root of the doc. + # + # # For example + # --- + # chart_values: + # installCRDs: true + chart_values: {} + kms_aliases_allow_decrypt: [] + # - "alias/foo/bar" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.7.1, != 2.21.0, != 2.21.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [external\_secrets\_operator](#module\_external\_secrets\_operator) | cloudposse/helm-release/aws | 0.10.1 | +| [external\_ssm\_secrets](#module\_external\_ssm\_secrets) | cloudposse/helm-release/aws | 0.10.1 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_namespace.default](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_kms_alias.kms_aliases](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/kms_alias) | data source | +| [kubernetes_resources.crd](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/resources) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | `"external-secrets"` | no | +| [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `"External Secrets Operator is a Kubernetes operator that integrates external secret management systems including AWS SSM, Parameter Store, Hasicorp Vault, 1Password Secrets Automation, etc. It reads values from external vaults and injects values as a Kubernetes Secret"` | no | +| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | `"https://charts.external-secrets.io"` | no | +| [chart\_values](#input\_chart\_values) | Additional values to yamlencode as `helm_release` values. | `any` | `{}` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `"0.6.0-rc1"` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_namespace](#input\_create\_namespace) | Create the Kubernetes namespace if it does not yet exist | `bool` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kms\_aliases\_allow\_decrypt](#input\_kms\_aliases\_allow\_decrypt) | A list of KMS aliases that the SecretStore is allowed to decrypt. | `list(string)` | `[]` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | n/a | yes | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [parameter\_store\_paths](#input\_parameter\_store\_paths) | A list of path prefixes that the SecretStore is allowed to access via IAM. This should match the convention 'service' that Chamber uploads keys under. | `set(string)` |
[
"app"
]
| no | +| [rbac\_enabled](#input\_rbac\_enabled) | Service Account for pods. | `bool` | `true` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
| n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | +| [verify](#input\_verify) | Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart | `bool` | `false` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [metadata](#output\_metadata) | Block status of the deployed release | + + + +## References + +- [Secrets Management Strategy](https://docs.cloudposse.com/layers/project/design-decisions/decide-on-secrets-management-strategy-for-terraform) +- https://external-secrets.io/v0.5.9/ +- https://external-secrets.io/v0.5.9/provider-aws-parameter-store/ diff --git a/modules/eks/external-secrets-operator/additional-iam-policy-statements.tf b/modules/eks/external-secrets-operator/additional-iam-policy-statements.tf new file mode 100644 index 000000000..5d9cd000d --- /dev/null +++ b/modules/eks/external-secrets-operator/additional-iam-policy-statements.tf @@ -0,0 +1,17 @@ +locals { + # If you have custom policy statements, override this declaration by creating + # a file called `additional-iam-policy-statements_override.tf`. + # Then add the custom policy statements to the overridable_additional_iam_policy_statements in that file. + overridable_additional_iam_policy_statements = [ + # { + # sid = "UseKMS" + # effect = "Allow" + # actions = [ + # "kms:Decrypt" + # ] + # resources = [ + # "*" + # ] + # } + ] +} diff --git a/modules/eks/external-secrets-operator/charts/external-ssm-secrets/.helmignore b/modules/eks/external-secrets-operator/charts/external-ssm-secrets/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/modules/eks/external-secrets-operator/charts/external-ssm-secrets/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/modules/eks/external-secrets-operator/charts/external-ssm-secrets/Chart.yaml b/modules/eks/external-secrets-operator/charts/external-ssm-secrets/Chart.yaml new file mode 100644 index 000000000..3725b354f --- /dev/null +++ b/modules/eks/external-secrets-operator/charts/external-ssm-secrets/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: external-ssm-secrets +description: This Chart handles deploying custom resource definitions needed to access SSM via external-secrets-operator + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" diff --git a/modules/eks/external-secrets-operator/charts/external-ssm-secrets/templates/ssm-secret-store.yaml b/modules/eks/external-secrets-operator/charts/external-ssm-secrets/templates/ssm-secret-store.yaml new file mode 100644 index 000000000..a1482674b --- /dev/null +++ b/modules/eks/external-secrets-operator/charts/external-ssm-secrets/templates/ssm-secret-store.yaml @@ -0,0 +1,10 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: "secret-store-parameter-store" +spec: + provider: + aws: + service: ParameterStore + region: {{ .Values.region }} + role: {{ .Values.role }} # role is created via helm-release; see `service_account_set_key_path` diff --git a/modules/eks/external-secrets-operator/context.tf b/modules/eks/external-secrets-operator/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/external-secrets-operator/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/external-secrets-operator/examples/app-secrets.yaml b/modules/eks/external-secrets-operator/examples/app-secrets.yaml new file mode 100644 index 000000000..ea4928d7a --- /dev/null +++ b/modules/eks/external-secrets-operator/examples/app-secrets.yaml @@ -0,0 +1,24 @@ +# example to fetch all secrets underneath the `/app/` prefix (service). +# Keys are rewritten within the K8S Secret to be predictable and omit the +# prefix. + +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: app-secrets +spec: + refreshInterval: 30s + secretStoreRef: + name: "secret-store-parameter-store" # Must match name of the Cluster Secret Store created by this component + kind: ClusterSecretStore + target: + creationPolicy: Owner + name: app-secrets + dataFrom: + - find: + name: + regexp: "^/app/" # Match the path prefix of your service + rewrite: + - regexp: + source: "/app/(.*)" # Remove the path prefix of your service from the name before creating the envars + target: "$1" diff --git a/modules/eks/external-secrets-operator/examples/external-secrets.yaml b/modules/eks/external-secrets-operator/examples/external-secrets.yaml new file mode 100644 index 000000000..b88414ef2 --- /dev/null +++ b/modules/eks/external-secrets-operator/examples/external-secrets.yaml @@ -0,0 +1,18 @@ +# example to fetch a single secret from our Parameter Store `SecretStore` + +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: single-secret +spec: + refreshInterval: 30s + secretStoreRef: + name: "secret-store-parameter-store" # Must match name of the Cluster Secret Store created by this component + kind: ClusterSecretStore + target: + creationPolicy: Owner + name: single-secret + data: + - secretKey: good_secret + remoteRef: + key: /app/good_secret diff --git a/modules/eks/external-secrets-operator/helm-variables.tf b/modules/eks/external-secrets-operator/helm-variables.tf new file mode 100644 index 000000000..a0b007642 --- /dev/null +++ b/modules/eks/external-secrets-operator/helm-variables.tf @@ -0,0 +1,71 @@ +variable "kubernetes_namespace" { + type = string + description = "The namespace to install the release into." +} + +variable "chart_description" { + type = string + description = "Set release description attribute (visible in the history)." + default = "External Secrets Operator is a Kubernetes operator that integrates external secret management systems including AWS SSM, Parameter Store, Hasicorp Vault, 1Password Secrets Automation, etc. It reads values from external vaults and injects values as a Kubernetes Secret" +} + +variable "chart_repository" { + type = string + description = "Repository URL where to locate the requested chart." + default = "https://charts.external-secrets.io" +} + +variable "chart" { + type = string + description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended." + default = "external-secrets" +} + +variable "chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." + default = "0.6.0-rc1" + # using RC to address this bug https://github.com/external-secrets/external-secrets/issues/1511 +} + +variable "chart_values" { + type = any + description = "Additional values to yamlencode as `helm_release` values." + default = {} +} + +variable "create_namespace" { + type = bool + description = "Create the Kubernetes namespace if it does not yet exist" + default = null +} + +variable "verify" { + type = bool + description = "Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart" + default = false +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`." + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used." + default = true +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails." + default = true +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = null +} diff --git a/modules/eks/external-secrets-operator/main.tf b/modules/eks/external-secrets-operator/main.tf new file mode 100644 index 000000000..bd79a0400 --- /dev/null +++ b/modules/eks/external-secrets-operator/main.tf @@ -0,0 +1,158 @@ +locals { + enabled = module.this.enabled + account_name = lookup(module.this.descriptors, "account_name", module.this.stage) + account = module.account_map.outputs.full_account_map[local.account_name] +} + +resource "kubernetes_namespace" "default" { + count = local.enabled && var.create_namespace ? 1 : 0 + + metadata { + name = var.kubernetes_namespace + + labels = module.this.tags + } +} + +# CRDs are automatically installed by "cloudposse/helm-release/aws" +# https://external-secrets.io/v0.5.9/guides-getting-started/ +module "external_secrets_operator" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + name = "" # avoid redundant release name in IAM role: ...-ekc-cluster-external-secrets-operator-external-secrets-operator@secrets + description = var.chart_description + + repository = var.chart_repository + chart = var.chart + chart_version = var.chart_version + kubernetes_namespace = join("", kubernetes_namespace.default[*].id) + create_namespace = false + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + verify = var.verify + + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + + service_account_name = module.this.name + service_account_namespace = var.kubernetes_namespace + + iam_role_enabled = true + iam_policy = [{ + statements = concat([ + { + sid = "ReadParameterStore" + effect = "Allow" + actions = [ + "ssm:GetParameter*" + ] + resources = concat( + [for parameter_store_path in var.parameter_store_paths : ( + "arn:aws:ssm:${var.region}:${local.account}:parameter/${parameter_store_path}/*" + )], + [for parameter_store_path in var.parameter_store_paths : ( + "arn:aws:ssm:${var.region}:${local.account}:parameter/${parameter_store_path}" + )]) + }, + { + sid = "DescribeParameters" + effect = "Allow" + actions = [ + "ssm:DescribeParameter*" + ] + resources = [ + "arn:aws:ssm:${var.region}:${local.account}:*" + ] + }], + local.overridable_additional_iam_policy_statements, + length(var.kms_aliases_allow_decrypt) > 0 ? [ + { + sid = "DecryptKMS" + effect = "Allow" + actions = [ + "kms:Decrypt" + ] + resources = local.kms_aliases_target_arns + } + ] : [] + ) + }] + + values = compact([ + yamlencode({ + serviceAccount = { + name = module.this.name + } + rbac = { + create = var.rbac_enabled + } + }), + # additional values + yamlencode(var.chart_values) + ]) + + context = module.this.context +} + +data "kubernetes_resources" "crd" { + api_version = "apiextensions.k8s.io/v1" + kind = "CustomResourceDefinition" + field_selector = "metadata.name==externalsecrets.external-secrets.io" +} + +module "external_ssm_secrets" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + enabled = local.enabled && length(data.kubernetes_resources.crd.objects) > 0 + + name = "ssm" # distinguish from external_secrets_operator + description = "This Chart uses creates a SecretStore and ExternalSecret to pull variables (under a given path) from AWS SSM Parameter Store into a Kubernetes secret." + + chart = "${path.module}/charts/external-ssm-secrets" + kubernetes_namespace = join("", kubernetes_namespace.default[*].id) + create_namespace = false + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + + service_account_name = module.this.name + service_account_namespace = var.kubernetes_namespace + service_account_role_arn_annotation_enabled = true + service_account_set_key_path = "role" + + values = compact([ + yamlencode({ + region = var.region, + parameter_store_paths = var.parameter_store_paths + resources = var.resources + serviceAccount = { + name = module.this.name + } + rbac = { + create = var.rbac_enabled + } + }) + ]) + + context = module.this.context + + depends_on = [ + # CRDs from external_secrets_operator need to be installed first + module.external_secrets_operator, + ] +} + +data "aws_kms_alias" "kms_aliases" { + for_each = { for i, v in var.kms_aliases_allow_decrypt : v => v } + name = each.value +} + +locals { + kms_aliases_target_arns = [for k, v in data.aws_kms_alias.kms_aliases : data.aws_kms_alias.kms_aliases[k].target_key_arn] +} diff --git a/modules/eks/external-secrets-operator/outputs.tf b/modules/eks/external-secrets-operator/outputs.tf new file mode 100644 index 000000000..273251dd7 --- /dev/null +++ b/modules/eks/external-secrets-operator/outputs.tf @@ -0,0 +1,4 @@ +output "metadata" { + value = try(one(module.external_secrets_operator.metadata), null) + description = "Block status of the deployed release" +} diff --git a/modules/eks/external-secrets-operator/provider-helm.tf b/modules/eks/external-secrets-operator/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/modules/eks/external-secrets-operator/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/external-secrets-operator/providers.tf b/modules/eks/external-secrets-operator/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/eks/external-secrets-operator/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/external-secrets-operator/remote-state.tf b/modules/eks/external-secrets-operator/remote-state.tf new file mode 100644 index 000000000..7863c9586 --- /dev/null +++ b/modules/eks/external-secrets-operator/remote-state.tf @@ -0,0 +1,20 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} + +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + tenant = module.iam_roles.global_tenant_name + environment = module.iam_roles.global_environment_name + stage = module.iam_roles.global_stage_name + + context = module.this.context +} diff --git a/modules/eks/external-secrets-operator/variables.tf b/modules/eks/external-secrets-operator/variables.tf new file mode 100644 index 000000000..48b22c69e --- /dev/null +++ b/modules/eks/external-secrets-operator/variables.tf @@ -0,0 +1,42 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "rbac_enabled" { + type = bool + default = true + description = "Service Account for pods." +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "parameter_store_paths" { + type = set(string) + description = "A list of path prefixes that the SecretStore is allowed to access via IAM. This should match the convention 'service' that Chamber uploads keys under." + default = ["app"] +} + +variable "resources" { + type = object({ + limits = object({ + cpu = string + memory = string + }) + requests = object({ + cpu = string + memory = string + }) + }) + description = "The cpu and memory of the deployment's limits and requests." +} + +variable "kms_aliases_allow_decrypt" { + type = list(string) + description = "A list of KMS aliases that the SecretStore is allowed to decrypt." + default = [] +} diff --git a/modules/eks/external-secrets-operator/versions.tf b/modules/eks/external-secrets-operator/versions.tf new file mode 100644 index 000000000..46584b569 --- /dev/null +++ b/modules/eks/external-secrets-operator/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.7.1, != 2.21.0, != 2.21.0" + } + } +} diff --git a/modules/eks/github-actions-runner/CHANGELOG.md b/modules/eks/github-actions-runner/CHANGELOG.md new file mode 100644 index 000000000..d086975da --- /dev/null +++ b/modules/eks/github-actions-runner/CHANGELOG.md @@ -0,0 +1,167 @@ +## Initial Release + +This release has been tested and used in production, but testing has not covered all available features. Please use with +caution and report any issues you encounter. + +### Migration from `actions-runner-controller` + +GitHub has released its own official self-hosted GitHub Actions Runner support, replacing the +`actions-runner-controller` implementation developed by Summerwind. (See the +[announcement from GitHub](https://github.com/actions/actions-runner-controller/discussions/2072).) Accordingly, this +component is a replacement for the +[`actions-runner-controller`](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/actions-runner-controller) +component. Although there are different defaults for some of the configuration options, if you are already using +`actions-runner-controller` you should be able to reuse the GitHub app or PAT and image pull secret you are already +using, making migration relatively straightforward. + +We recommend deploying this component into a separate namespace (or namespaces) than `actions-runner-controller` and get +the new runners sets running before you remove the old ones. You can then migrate your workflows to use the new runners +sets and have zero downtime. + +Major differences: + +- The official GitHub runners deployed are different from the GitHub hosted runners and the Summerwind self-hosted + runners in that + [they have very few tools installed](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller#about-the-runner-container-image). + You will need to install any tools you need in your workflows, either as part of your workflow (recommended) or by + maintaining a + [custom runner image](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller#creating-your-own-runner-image), + or by running such steps in a + [separate container](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) that has the tools + pre-installed. Many tools have publicly available actions to install them, such as `actions/setup-node` to install + NodeJS or `dcarbone/install-jq-action` to install `jq`. You can also install packages using + `awalsh128/cache-apt-pkgs-action`, which has the advantage of being able to skip the installation if the package is + already installed, so you can more efficiently run the same workflow on GitHub hosted as well as self-hosted runners. +- Self-hosted runners, such as those deployed with the `actions-runner-controller` component, are targeted by a set of + labels indicated by a workflow's `runs-on` array, of which the first must be "self-hosted". Runner Sets, such as are + deployed with this component, are targeted by a single label, which is the name of the Runner Set. This means that you + will need to update your workflows to target the new Runner Set label. See + [here](https://github.com/actions/actions-runner-controller/discussions/2921#discussioncomment-7501051) for the + reasoning behind GitHub's decision to use a single label instead of a set. +- The `actions-runner-controller` component uses the published Helm chart for the controller, but there is none for the + runners, so it includes a custom Helm chart for them. However, for Runner Sets, GitHub has published 2 charts, one for + the controller and one for the runners (runner sets). This means that this component requires configuration (e.g. + version numbers) of 2 charts, although both should be kept at the same version. +- The `actions-runner-controller` component has a `resources/values.yaml` file that provided defaults for the controller + Helm chart. This component does not have files like that by default, but supports a `resources/values-controller.yaml` + file for the "gha-runner-scale-set-controller" chart and a `resources/values-runner.yaml` file for the + "gha-runner-scale-set" chart. +- The default values for the SSM paths for the GitHub auth secret and the imagePullSecret have changed. Specify the old + values explicitly to keep using the same secrets. +- The `actions-runner-controller` component creates an IAM Role (IRSA) for the runners to use. This component does not + create an IRSA, because the chart does not support using one while in "dind" mode. Use GitHub OIDC authentication + inside your workflows instead. +- The Runner Sets deployed by this component use a different autoscaling mechanism, so most of the + `actions-runner-controller` configuration options related to autoscaling are not applicable. +- For the same reason, this component does not deploy a webhook listener or Ingress and does not require configuration + of a GitHub webhook. +- The `actions-runner-controller` component has an input named `existing_kubernetes_secret_name`. The equivalent input + for this component is `github_kubernetes_secret_name`, in order to clearly distinguish it from the + `image_pull_kubernetes_secret_name` input. + +### Translating configuration from `actions-runner-controller` + +Here is an example configuration for the `github-actions-runner` controller, with comments indicating where in the +`actions-runner-controller` configuration the corresponding configuration option can be copied from. + +```yaml +components: + terraform: + eks/github-actions-runner: + vars: + # This first set of values you can just copy from here. + # However, if you had customized the standard Helm configuration + # (such things as `cleanup_on_fail`, `atomic`, or `timeout`), you + # now need to do that per chart under the `charts` input. + enabled: true + name: "gha-runner-controller" + charts: + controller: + # As of the time of the creation of this component, 0.7.0 is the latest version + # of the chart. If you use a newer version, check for breaking changes + # and any updates to this component that may be required. + # Find the latest version at https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set-controller/Chart.yaml#L18 + chart_version: "0.7.0" + runner_sets: + # We expect that the runner set chart will always be at the same version as the controller chart, + # but the charts are still in pre-release so that may change. + # Find the latest version at https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set/Chart.yaml#L18 + chart_version: "0.7.0" + controller: + # These inputs from `actions-runner-controller` are now parts of the controller configuration input + kubernetes_namespace: "gha-runner-controller" + create_namespace: true + replicas: 1 # From `actions-runner-controller` file `resources/values.yaml`, value `replicaCount` + # resources from var.resources + + # These values can be copied directly from the `actions-runner-controller` configuration + ssm_github_secret_path: "/github_runners/controller_github_app_secret" + github_app_id: "250828" + github_app_installation_id: "30395627" + + # These values require some converstion from the `actions-runner-controller` configuration + # Set `create_github_kubernetes_secret` to `true` if `existing_kubernetes_secret_name` was not set, `false` otherwise. + create_github_kubernetes_secret: true + # If `existing_kubernetes_secret_name` was set, copy the setting to `github_kubernetes_secret_name` here. + # github_kubernetes_secret_name: + + # To configure imagePullSecrets: + # Set `image_pull_secret_enabled` to the value of `docker_config_json_enabled` in `actions-runner-controller` configuration. + image_pull_secret_enabled: true + # Set `ssm_image_pull_secret_path` to the value of `ssm_docker_config_json_path` in `actions-runner-controller` configuration. + ssm_image_pull_secret_path: "/github_runners/docker/config-json" + + # To configure the runner sets, there is still a map of `runners`, but most + # of the configuration options from `actions-runner-controller` are not applicable. + # Most of the applicable configuration options are the same as for `actions-runner-controller`. + runners: + # The name of the runner set is the key of the map. The name is now the only label + # that is used to target the runner set. + self-hosted-default: + # Namespace is new. The `actions-runner-controller` always deployed the runners to the same namespace as the controller. + # Runner sets support deploying the runners in a namespace other than the controller, + # and it is recommended to do so. If you do not set kubernetes_namespace, the runners will be deployed + # in the same namespace as the controller. + kubernetes_namespace: "gha-runner-private" + # Set create_namespace to false if the namespace has been created by another component. + create_namespace: true + + # `actions-runner-controller` had a `dind_enabled` input that was switch between "kubernetes" and "dind" mode. + # This component has a `mode` input that can be set to "kubernetes" or "dind". + mode: "dind" + + # Where the `actions-runner-controller` configuration had `type` and `scope`, + # the runner set has `github_url`. For organization scope runners, use https://github.com/myorg + # (or, if you are using Enterprise GitHub, your GitHub Enterprise URL). + # For repo runners, use the repo URL, e.g. https://github.com/myorg/myrepo + github_url: https://github.com/cloudposse + + # These configuration options are the same as for `actions-runner-controller` + # group: "default" + # node_selector: + # kubernetes.io/os: "linux" + # kubernetes.io/arch: "arm64" + # tolerations: + # - key: "kubernetes.io/arch" + # operator: "Equal" + # value: "arm64" + # effect: "NoSchedule" + # If min_replicas > 0 and you also have do-not-evict: "true" set + # then the idle/waiting runner will keep Karpenter from deprovisioning the node + # until a job runs and the runner is deleted. So we do not set it by default. + # pod_annotations: + # karpenter.sh/do-not-evict: "true" + min_replicas: 1 + max_replicas: 12 + resources: + limits: + cpu: 1100m + memory: 1024Mi + ephemeral-storage: 5Gi + requests: + cpu: 500m + memory: 256Mi + ephemeral-storage: 1Gi + # The rest of the `actions-runner-controller` configuration is not applicable. + # This includes `labels` as well as anything to do with autoscaling. +``` diff --git a/modules/eks/github-actions-runner/README.md b/modules/eks/github-actions-runner/README.md new file mode 100644 index 000000000..0c511f62d --- /dev/null +++ b/modules/eks/github-actions-runner/README.md @@ -0,0 +1,473 @@ +--- +tags: + - component/eks/github-actions-runner + - layer/github + - provider/aws + - provider/helm +--- + +# Component: `eks/github-actions-runner` + +This component deploys self-hosted GitHub Actions Runners and a +[Controller](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller#introduction) +on an EKS cluster, using +"[runner scale sets](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#runner-scale-set)". + +This solution is supported by GitHub and supersedes the +[actions-runner-controller](https://github.com/actions/actions-runner-controller/blob/master/docs/about-arc.md) +developed by Summerwind and deployed by Cloud Posse's +[actions-runner-controller](https://docs.cloudposse.com/components/library/aws/eks/actions-runner-controller/) +component. + +### Current limitations + +The runner image used by Runner Sets contains +[no more packages than are necessary](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller#about-the-runner-container-image) +to run the runner. This is in contrast to the Summerwind implementation, which contains some commonly needed packages +like `build-essential`, `curl`, `wget`, `git`, and `jq`, and the GitHub hosted images which contain a robust set of +tools. (This is a limitation of the official Runner Sets implementation, not this component per se.) You will need to +install any tools you need in your workflows, either as part of your workflow (recommended), by maintaining a +[custom runner image](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller#creating-your-own-runner-image), +or by running such steps in a +[separate container](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) that has the tools +pre-installed. Many tools have publicly available actions to install them, such as `actions/setup-node` to install +NodeJS or `dcarbone/install-jq-action` to install `jq`. You can also install packages using +`awalsh128/cache-apt-pkgs-action`, which has the advantage of being able to skip the installation if the package is +already installed, so you can more efficiently run the same workflow on GitHub hosted as well as self-hosted runners. + +:::info + +There are (as of this writing) open feature requests to add some commonly needed packages to the official Runner Sets +runner image. You can upvote these requests +[here](https://github.com/actions/actions-runner-controller/discussions/3168) and +[here](https://github.com/orgs/community/discussions/80868) to help get them implemented. + +::: + +In the current version of this component, only "dind" (Docker in Docker) mode has been tested. Support for "kubernetes" +mode is provided, but has not been validated. + +Many elements in the Controller chart are not directly configurable by named inputs. To configure them, you can use the +`controller.chart_values` input or create a `resources/values-controller.yaml` file in the component to supply values. + +Almost all the features of the Runner Scale Set chart are configurable by named inputs. The exceptions are: + +- There is no specific input for specifying an outbound HTTP proxy. +- There is no specific input for supplying a + [custom certificate authority (CA) certificate](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#custom-tls-certificates) + to use when connecting to GitHub Enterprise Server. + +You can specify these values by creating a `resources/values-runner.yaml` file in the component and setting values as +shown by the default Helm +[values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set/values.yaml), +and they will be applied to all runners. + +Currently, this component has some additional limitations. In particular: + +- The controller and all runners and listeners share the Image Pull Secrets. You cannot use different ones for different + runners. +- All the runners use the same GitHub secret (app or PAT). Using a GitHub app is preferred anyway, and the single GitHub + app serves the entire organization. +- Only one controller is supported per cluster, though it can have multiple replicas. + +These limitations could be addressed if there is demand. Contact +[Cloud Posse Professional Services](https://cloudposse.com/professional-services/) if you would be interested in +sponsoring the development of any of these features. + +### Ephemeral work storage + +The runners are configured to use ephemeral storage for workspaces, but the details and defaults can be a bit confusing. + +When running in "dind" ("Docker in Docker") mode, the default is to use `emptyDir`, which means space on the `kubelet` +base directory, which is usually the root disk. You can manage the amount of storage allowed to be used with +`ephemeral_storage` requests and limits, or you can just let it use whatever free space there is on the root disk. + +When running in `kubernetes` mode, the only supported local disk storage is an ephemeral `PersistentVolumeClaim`, which +causes a separate disk to be allocated for the runner pod. This disk is ephemeral, and will be deleted when the runner +pod is deleted. When combined with the recommended ephemeral runner configuration, this means that a new disk will be +created for each job, and deleted when the job is complete. That is a lot of overhead and will slow things down +somewhat. + +The size of the attached PersistentVolume is controlled by `ephemeral_pvc_storage` (a Kubernetes size string like "1G") +and the kind of storage is controlled by `ephemeral_pvc_storage_class` (which can be omitted to use the cluster default +storage class). + +This mode is also optionally available when using `dind`. To enable it, set `ephemeral_pvc_storage` to the desired size. +Leave `ephemeral_pvc_storage` at the default value of `null` to use `emptyDir` storage (recommended). + +Beware that using a PVC may significantly increase the startup of the runner. If you are using a PVC, you may want to +keep idle runners available so that jobs can be started without waiting for a new runner to start. + +## Usage + +**Stack Level**: Regional + +Once the catalog file is created, the file can be imported as follows. + +```yaml +import: + - catalog/eks/github-actions-runner + ... +``` + +The default catalog values `e.g. stacks/catalog/eks/github-actions-runner.yaml` + +```yaml +components: + terraform: + eks/github-actions-runner: + vars: + enabled: true + ssm_region: "us-east-2" + name: "gha-runner-controller" + charts: + controller: + chart_version: "0.7.0" + runner_sets: + chart_version: "0.7.0" + controller: + kubernetes_namespace: "gha-runner-controller" + create_namespace: true + + create_github_kubernetes_secret: true + ssm_github_secret_path: "/github-action-runners/github-auth-secret" + github_app_id: "123456" + github_app_installation_id: "12345678" + runners: + config-default: &runner-default + enabled: false + github_url: https://github.com/cloudposse + # group: "default" + # kubernetes_namespace: "gha-runner-private" + create_namespace: true + # If min_replicas > 0 and you also have do-not-evict: "true" set + # then the idle/waiting runner will keep Karpenter from deprovisioning the node + # until a job runs and the runner is deleted. + # override by setting `pod_annotations: {}` + pod_annotations: + karpenter.sh/do-not-evict: "true" + min_replicas: 0 + max_replicas: 8 + resources: + limits: + cpu: 1100m + memory: 1024Mi + ephemeral-storage: 5Gi + requests: + cpu: 500m + memory: 256Mi + ephemeral-storage: 1Gi + self-hosted-default: + <<: *runner-default + enabled: true + kubernetes_namespace: "gha-runner-private" + # If min_replicas > 0 and you also have do-not-evict: "true" set + # then the idle/waiting runner will keep Karpenter from deprovisioning the node + # until a job runs and the runner is deleted. So we override the default. + pod_annotations: {} + min_replicas: 1 + max_replicas: 12 + resources: + limits: + cpu: 1100m + memory: 1024Mi + ephemeral-storage: 5Gi + requests: + cpu: 500m + memory: 256Mi + ephemeral-storage: 1Gi + self-hosted-large: + <<: *runner-default + enabled: true + resources: + limits: + cpu: 6000m + memory: 7680Mi + ephemeral-storage: 90G + requests: + cpu: 4000m + memory: 7680Mi + ephemeral-storage: 40G +``` + +### Authentication and Secrets + +The GitHub Action Runners need to authenticate to GitHub in order to do such things as register runners and pickup jobs. +You can authenticate using either a +[GitHub App](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/authenticating-to-the-github-api#authenticating-arc-with-a-github-app) +or a +[Personal Access Token (classic)](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/authenticating-to-the-github-api#authenticating-arc-with-a-personal-access-token-classic). +The preferred way to authenticate is by _creating_ and _installing_ a GitHub App. This is the recommended approach as it +allows for much more restricted access than using a Personal Access Token (classic), and the Action Runners do not +currently support using a fine-grained Personal Access Token. + +#### Site note about SSM and Regions + +This component supports using AWS SSM to store and retrieve secrets. SSM parameters are regional, so if you want to +deploy to multiple regions you have 2 choices: + +1. Create the secrets in each region. This is the most robust approach, but requires you to create the secrets in each + region and keep them in sync. +2. Create the secrets in one region and use the `ssm_region` input to specify the region where they are stored. This is + the easiest approach, but does add some obstacles to managing deployments during a region outage. If the region where + the secrets are stored goes down, there will be no impact on runners in other regions, but you will not be able to + deploy new runners or modify existing runners until the SSM region is restored or until you set up SSM parameters in + a new region. + +Alternatively, you can create Kubernetes secrets outside of this component (perhaps using +[SOPS](https://github.com/getsops/sops)) and reference them by name. We describe here how to save the secrets to SSM, +but you can save the secrets wherever and however you want to, as long as you deploy them as Kubernetes secret the +runners can reference. If you store them in SSM, this component will take care of the rest, but the standard Terraform +caveat applies: any secrets referenced by Terraform will be stored unencrypted in the Terraform state file. + +#### Creating and Using a GitHub App + +Follow the instructions +[here](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/authenticating-to-the-github-api#authenticating-arc-with-a-github-app) +to create and install a GitHub App for the runners to use for authentication. + +At the App creation stage, you will be asked to generate a private key. This is the private key that will be used to +authenticate the Action Runner. Download the file and store the contents in SSM using the following command, adjusting +the profile, region, and file name. The profile should be the `terraform` role in the account to which you are deploying +the runner controller. The region should be the region where you are deploying the primary runner controller. If you are +deploying runners to multiple regions, they can all reference the same SSM parameter by using the `ssm_region` input to +specify the region where they are stored. The file name (argument to `cat`) should be the name of the private key file +you downloaded. + +``` +# Adjust profile name and region to suit your environment, use file name you chose for key +AWS_PROFILE=acme-core-gbl-auto-terraform AWS_REGION=us-west-2 chamber write github-action-runners github-auth-secret -- "$(cat APP_NAME.DATE.private-key.pem)" +``` + +You can verify the file was correctly written to SSM by matching the private key fingerprint reported by GitHub with: + +``` +AWS_PROFILE=acme-core-gbl-auto-terraform AWS_REGION=us-west-2 chamber read -q github-action-runners github-auth-secret | openssl rsa -in - -pubout -outform DER | openssl sha256 -binary | openssl base64 +``` + +At this stage, record the Application ID and the private key fingerprint in your secrets manager (e.g. 1Password). You +may want to record the private key as well, or you may consider it sufficient to have it in SSM. You will need the +Application ID to configure the runner controller, and want the fingerprint to verify the private key. (You can see the +fingerprint in the GitHub App settings, under "Private keys".) + +Proceed to install the GitHub App in the organization or repository you want to use the runner controller for, and +record the Installation ID (the final numeric part of the URL, as explained in the instructions linked above) in your +secrets manager. You will need the Installation ID to configure the runner controller. + +In your stack configuration, set the following variables, making sure to quote the values so they are treated as +strings, not numbers. + +``` +github_app_id: "12345" +github_app_installation_id: "12345" +``` + +#### OR (obsolete): Creating and Using a Personal Access Token (classic) + +Though not recommended, you can use a Personal Access Token (classic) to authenticate the runners. To do so, create a +PAT (classic) as described in the +[GitHub Documentation](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/authenticating-to-the-github-api#authenticating-arc-with-a-personal-access-token-classic). +Save this to the value specified by `ssm_github_token_path` using the following command, adjusting the AWS profile and +region as explained above: + +``` +AWS_PROFILE=acme-core-gbl-auto-terraform AWS_REGION=us-west-2 chamber write github-action-runners github-auth-secret -- "" +``` + +### Using Runner Groups + +GitHub supports grouping runners into distinct +[Runner Groups](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups), +which allow you to have different access controls for different runners. Read the linked documentation about creating +and configuring Runner Groups, which you must do through the GitHub Web UI. If you choose to create Runner Groups, you +can assign one or more Runner Sets (from the `runners` map) to groups (only one group per runner set, but multiple sets +can be in the same group) by including `group: ` in the runner configuration. We recommend including +it immediately after `github_url`. + +### Interaction with Karpenter or other EKS autoscaling solutions + +Kubernetes cluster autoscaling solutions generally expect that a Pod runs a service that can be terminated on one Node +and restarted on another with only a short duration needed to finish processing any in-flight requests. When the cluster +is resized, the cluster autoscaler will do just that. However, GitHub Action Runner Jobs do not fit this model. If a Pod +is terminated in the middle of a job, the job is lost. The likelihood of this happening is increased by the fact that +the Action Runner Controller Autoscaler is expanding and contracting the size of the Runner Pool on a regular basis, +causing the cluster autoscaler to more frequently want to scale up or scale down the EKS cluster, and, consequently, to +move Pods around. + +To handle these kinds of situations, Karpenter respects an annotation on the Pod: + +```yaml +spec: + template: + metadata: + annotations: + karpenter.sh/do-not-evict: "true" +``` + +When you set this annotation on the Pod, Karpenter will not voluntarily evict it. This means that the Pod will stay on +the Node it is on, and the Node it is on will not be considered for deprovisioning (scale down). This is good because it +means that the Pod will not be terminated in the middle of a job. However, it also means that the Node the Pod is on +will remain running until the Pod is terminated, even if the node is underutilized and Karpenter would like to get rid +of it. + +Since the Runner Pods terminate at the end of the job, this is not a problem for the Pods actually running jobs. +However, if you have set `minReplicas > 0`, then you have some Pods that are just idling, waiting for jobs to be +assigned to them. These Pods are exactly the kind of Pods you want terminated and moved when the cluster is +underutilized. Therefore, when you set `minReplicas > 0`, you should **NOT** set `karpenter.sh/do-not-evict: "true"` on +the Pod. + +### Updating CRDs + +When updating the chart or application version of `gha-runner-scale-set-controller`, it is possible you will need to +install new CRDs. Such a requirement should be indicated in the `gha-runner-scale-set-controller` release notes and may +require some adjustment to this component. + +This component uses `helm` to manage the deployment, and `helm` will not auto-update CRDs. If new CRDs are needed, +follow the instructions in the release notes for the Helm chart or `gha-runner-scale-set-controller` itself. + +### Useful Reference + +- Runner Scale Set Controller's Helm chart + [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set-controller/values.yaml) +- Runner Scale Set's Helm chart + [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set/values.yaml) +- Runner Scale Set's + [Docker image](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller#about-the-runner-container-image) + and + [how to create your own](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller#creating-your-own-runner-image) + +When reviewing documentation, code, issues, etc. for self-hosted GitHub action runners or the Actions Runner Controller +(ARC), keep in mind that there are 2 implementations going by that name. The original implementation, which is now +deprecated, uses the `actions.summerwind.dev` API group, and is at times called the Summerwind or Legacy implementation. +It is primarily described by documentation in the +[actions/actions-runner-controller](https://github.com/actions/actions-runner-controller) GitHub repository itself. + +The new implementation, which is the one this component uses, uses the `actions.github.com` API group, and is at times +called the GitHub implementation or "Runner Scale Sets" implementation. The new implementation is described in the +official +[GitHub documentation](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller). + +Feature requests about the new implementation are officially directed to the +[Actions category of GitHub community discussion](https://github.com/orgs/community/discussions/categories/actions). +However, Q&A and community support is directed to the `actions/actions-runner-controller` repo's +[Discussion section](https://github.com/actions/actions-runner-controller/discussions), though beware that discussions +about the old implementation are mixed in with discussions about the new implementation. + +Bug reports for the new implementation are still filed under the `actions/actions-runner-controller` repo's +[Issues](https://github.com/actions/actions-runner-controller/issues) tab, though again, these are mixed in with bug +reports for the old implementation. Look for the `gha-runner-scale-set` label to find issues specific to the new +implementation. + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | +| [aws.ssm](#provider\_aws.ssm) | >= 4.9.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.0, != 2.21.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [gha\_runner\_controller](#module\_gha\_runner\_controller) | cloudposse/helm-release/aws | 0.10.0 | +| [gha\_runners](#module\_gha\_runners) | cloudposse/helm-release/aws | 0.10.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_namespace.controller](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.runner](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_secret_v1.controller_image_pull_secret](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret_v1) | resource | +| [kubernetes_secret_v1.controller_ns_github_secret](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret_v1) | resource | +| [kubernetes_secret_v1.github_secret](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret_v1) | resource | +| [kubernetes_secret_v1.image_pull_secret](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret_v1) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_ssm_parameter.github_token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.image_pull_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [charts](#input\_charts) | Map of Helm charts to install. Keys are "controller" and "runner\_sets". |
map(object({
chart_version = string
chart = optional(string, null) # defaults according to the key to "gha-runner-scale-set-controller" or "gha-runner-scale-set"
chart_description = optional(string, null) # visible in Helm history
chart_repository = optional(string, "oci://ghcr.io/actions/actions-runner-controller-charts")
wait = optional(bool, true)
atomic = optional(bool, true)
cleanup_on_fail = optional(bool, true)
timeout = optional(number, null)
}))
| n/a | yes | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [controller](#input\_controller) | Configuration for the controller. |
object({
image = optional(object({
repository = optional(string, null)
tag = optional(string, null) # Defaults to the chart appVersion
pull_policy = optional(string, null)
}), null)
replicas = optional(number, 1)
kubernetes_namespace = string
create_namespace = optional(bool, true)
chart_values = optional(any, null)
affinity = optional(map(string), {})
labels = optional(map(string), {})
node_selector = optional(map(string), {})
priority_class_name = optional(string, "")
resources = optional(object({
limits = optional(object({
cpu = optional(string, null)
memory = optional(string, null)
}), null)
requests = optional(object({
cpu = optional(string, null)
memory = optional(string, null)
}), null)
}), null)
tolerations = optional(list(object({
key = string
operator = string
value = optional(string, null)
effect = string
})), [])
log_level = optional(string, "info")
log_format = optional(string, "json")
update_strategy = optional(string, "immediate")
})
| n/a | yes | +| [create\_github\_kubernetes\_secret](#input\_create\_github\_kubernetes\_secret) | If `true`, this component will create the Kubernetes Secret that will be used to get
the GitHub App private key or GitHub PAT token, based on the value retrieved
from SSM at the `var.ssm_github_secret_path`. WARNING: This will cause
the secret to be stored in plaintext in the Terraform state.
If `false`, this component will not create a secret and you must create it
(with the name given by `var.github_kubernetes_secret_name`) in every
namespace where you are deploying runners (the controller does not need it). | `bool` | `true` | no | +| [create\_image\_pull\_kubernetes\_secret](#input\_create\_image\_pull\_kubernetes\_secret) | If `true` and `image_pull_secret_enabled` is `true`, this component will create the Kubernetes image pull secret resource,
using the value in SSM at the path specified by `ssm_image_pull_secret_path`.
WARNING: This will cause the secret to be stored in plaintext in the Terraform state.
If `false`, this component will not create a secret and you must create it
(with the name given by `var.github_kubernetes_secret_name`) in every
namespace where you are deploying controllers or runners. | `bool` | `true` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [github\_app\_id](#input\_github\_app\_id) | The ID of the GitHub App to use for the runner controller. Leave empty if using a GitHub PAT. | `string` | `null` | no | +| [github\_app\_installation\_id](#input\_github\_app\_installation\_id) | The "Installation ID" of the GitHub App to use for the runner controller. Leave empty if using a GitHub PAT. | `string` | `null` | no | +| [github\_kubernetes\_secret\_name](#input\_github\_kubernetes\_secret\_name) | Name of the Kubernetes Secret that will be used to get the GitHub App private key or GitHub PAT token. | `string` | `"gha-github-secret"` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [image\_pull\_kubernetes\_secret\_name](#input\_image\_pull\_kubernetes\_secret\_name) | Name of the Kubernetes Secret that will be used as the imagePullSecret. | `string` | `"gha-image-pull-secret"` | no | +| [image\_pull\_secret\_enabled](#input\_image\_pull\_secret\_enabled) | Whether to configure the controller and runners with an image pull secret. | `bool` | `false` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region. | `string` | n/a | yes | +| [runners](#input\_runners) | Map of Runner Scale Set configurations, with the key being the name of the runner set.
Please note that the name must be in kebab-case (no underscores).

For example:
hcl
organization-runner = {
# Specify the scope (organization or repository) and the target
# of the runner via the `github_url` input.
# ex: https://github.com/myorg/myrepo or https://github.com/myorg
github_url = https://github.com/myorg
group = "core-automation" # Optional. Assigns the runners to a runner group, for access control.
min_replicas = 1
max_replicas = 5
}
|
map(object({
# we allow a runner to be disabled because Atmos cannot delete an inherited map object
enabled = optional(bool, true)
github_url = string
group = optional(string, null)
kubernetes_namespace = optional(string, null) # defaults to the controller's namespace
create_namespace = optional(bool, true)
image = optional(string, "ghcr.io/actions/actions-runner:latest") # repo and tag
mode = optional(string, "dind") # Optional. Can be "dind" or "kubernetes".
pod_labels = optional(map(string), {})
pod_annotations = optional(map(string), {})
affinity = optional(map(string), {})
node_selector = optional(map(string), {})
tolerations = optional(list(object({
key = string
operator = string
value = optional(string, null)
effect = string
# tolerationSeconds is not supported, because Terraform requires all objects in a list to have the same keys,
# but tolerationSeconds must be omitted to get the default behavior of "tolerate forever".
# If really needed, could use a default value of 1,000,000,000 (one billion seconds = about 32 years).
})), [])
min_replicas = number
max_replicas = number

# ephemeral_pvc_storage and _class are ignored for "dind" mode but required for "kubernetes" mode
ephemeral_pvc_storage = optional(string, null) # ex: 10Gi
ephemeral_pvc_storage_class = optional(string, null)

kubernetes_mode_service_account_annotations = optional(map(string), {})

resources = optional(object({
limits = optional(object({
cpu = optional(string, null)
memory = optional(string, null)
ephemeral-storage = optional(string, null)
}), null)
requests = optional(object({
cpu = optional(string, null)
memory = optional(string, null)
ephemeral-storage = optional(string, null)
}), null)
}), null)
}))
| `{}` | no | +| [ssm\_github\_secret\_path](#input\_ssm\_github\_secret\_path) | The path in SSM to the GitHub app private key file contents or GitHub PAT token. | `string` | `"/github-action-runners/github-auth-secret"` | no | +| [ssm\_image\_pull\_secret\_path](#input\_ssm\_image\_pull\_secret\_path) | SSM path to the base64 encoded `dockercfg` image pull secret. | `string` | `"/github-action-runners/image-pull-secrets"` | no | +| [ssm\_region](#input\_ssm\_region) | AWS Region where SSM secrets are stored. Defaults to `var.region`. | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [metadata](#output\_metadata) | Block status of the deployed release | +| [runners](#output\_runners) | Human-readable summary of the deployed runners | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/actions-runner-controller) - + Cloud Posse's upstream component +- [alb-controller](https://artifacthub.io/packages/helm/aws/aws-load-balancer-controller) - Helm Chart +- [alb-controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller) - AWS Load Balancer Controller +- [actions-runner-controller Webhook Driven Scaling](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/docs/detailed-docs.md#webhook-driven-scaling) +- [actions-runner-controller Chart Values](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/charts/actions-runner-controller/values.yaml) +- [How to set service account for workers spawned in Kubernetes mode](https://github.com/actions/actions-runner-controller/issues/2992#issuecomment-1764855221) + +[](https://cpco.io/component) diff --git a/modules/eks/github-actions-runner/context.tf b/modules/eks/github-actions-runner/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/github-actions-runner/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/github-actions-runner/main.tf b/modules/eks/github-actions-runner/main.tf new file mode 100644 index 000000000..6f4e74b92 --- /dev/null +++ b/modules/eks/github-actions-runner/main.tf @@ -0,0 +1,172 @@ +locals { + enabled = module.this.enabled + enabled_runners = { for k, v in var.runners : k => v if v.enabled && local.enabled } + + # Default chart names + controller_chart_name = "gha-runner-scale-set-controller" + runner_chart_name = "gha-runner-scale-set" + + image_pull_secret_enabled = local.enabled && var.image_pull_secret_enabled + create_image_pull_secret = local.image_pull_secret_enabled && var.create_image_pull_kubernetes_secret + image_pull_secret = one(data.aws_ssm_parameter.image_pull_secret[*].value) + image_pull_secret_name = var.image_pull_kubernetes_secret_name + + controller_namespace = var.controller.kubernetes_namespace + controller_namespace_set = toset([local.controller_namespace]) + runner_namespaces = toset([for v in values(local.enabled_runners) : coalesce(v.kubernetes_namespace, local.controller_namespace)]) + runner_only_namespaces = setsubtract(local.runner_namespaces, local.controller_namespace_set) + + # We have the possibility of several deployments to the same namespace, + # with some deployments configured to create the namespace and others not. + # We choose to create any namespace that is asked to be created, even if + # other deployments to the same namespace do not ask for it to be created. + all_runner_namespaces_to_create = local.enabled ? toset([ + for v in values(local.enabled_runners) : coalesce(v.kubernetes_namespace, local.controller_namespace) if v.create_namespace + ]) : [] + + # Potentially, the configuration calls for the controller's namespace to be created for the runner, + # even if the controller does not specify that its namespace be created. As before, + # we create the namespace if any deployment to the namespace asks for it to be created. + # Here, however, we have to be careful to create the controller's namespace + # using the controller's namespace resource, even if the request came from the runner. + create_controller_namespace = local.enabled && (var.controller.create_namespace || contains(local.all_runner_namespaces_to_create, local.controller_namespace)) + runner_namespaces_to_create = setsubtract(local.all_runner_namespaces_to_create, local.controller_namespace_set) + + # github_secret_namespaces = local.enabled ? local.runner_namespaces : [] + # image_pull_secret_namespaces = setunion(local.controller_namespace, local.runner_namespaces) + +} + +data "aws_ssm_parameter" "image_pull_secret" { + count = local.create_image_pull_secret ? 1 : 0 + + name = var.ssm_image_pull_secret_path + with_decryption = true + provider = aws.ssm +} + +# We want to completely deploy the controller before deploying the runners, +# so we need separate resources for the controller and the runners, or +# else there will be a circular dependency as the runners depend on the controller +# and the controller resources are mixed in with the runners. +resource "kubernetes_namespace" "controller" { + for_each = local.create_controller_namespace ? local.controller_namespace_set : [] + + metadata { + name = each.value + } + + # During destroy, we may need the IAM role preserved in order to run finalizers + # which remove resources. This depends_on ensures that the IAM role is not + # destroyed until after the namespace is destroyed. + depends_on = [module.gha_runner_controller.service_account_role_unique_id] +} + + +resource "kubernetes_secret_v1" "controller_image_pull_secret" { + for_each = local.create_image_pull_secret ? local.controller_namespace_set : [] + + metadata { + name = local.image_pull_secret_name + namespace = each.value + } + + binary_data = { ".dockercfg" = local.image_pull_secret } + + type = "kubernetes.io/dockercfg" + + depends_on = [kubernetes_namespace.controller] +} + +resource "kubernetes_secret_v1" "controller_ns_github_secret" { + for_each = local.create_github_secret && contains(local.runner_namespaces, local.controller_namespace) ? local.controller_namespace_set : [] + + metadata { + name = local.github_secret_name + namespace = each.value + } + + data = local.github_secrets[local.github_app_enabled ? "app" : "pat"] + + depends_on = [kubernetes_namespace.controller] +} + + +module "gha_runner_controller" { + source = "cloudposse/helm-release/aws" + version = "0.10.0" + + chart = coalesce(var.charts["controller"].chart, local.controller_chart_name) + repository = var.charts["controller"].chart_repository + description = var.charts["controller"].chart_description + chart_version = var.charts["controller"].chart_version + wait = var.charts["controller"].wait + atomic = var.charts["controller"].atomic + cleanup_on_fail = var.charts["controller"].cleanup_on_fail + timeout = var.charts["controller"].timeout + + # We need the module to wait for the namespace to be created before creating + # resources in the namespace, but we need it to create the IAM role first, + # so we cannot directly depend on the namespace resources, because that + # would create a circular dependency. So instead we make the kubernetes + # namespace depend on the resource, while the service_account_namespace + # (which is used to create the IAM role) does not. + kubernetes_namespace = try(kubernetes_namespace.controller[local.controller_namespace].metadata[0].name, local.controller_namespace) + create_namespace_with_kubernetes = false + + eks_cluster_oidc_issuer_url = module.eks.outputs.eks_cluster_identity_oidc_issuer + + service_account_name = module.this.name + service_account_namespace = local.controller_namespace + + iam_role_enabled = false + + values = compact([ + # hardcoded values + try(file("${path.module}/resources/values-controller.yaml"), null), + # standard k8s object settings + yamlencode({ + fullnameOverride = module.this.name, + serviceAccount = { + name = module.this.name + }, + affinity = var.controller.affinity, + labels = var.controller.labels, + nodeSelector = var.controller.node_selector, + priorityClassName = var.controller.priority_class_name, + replicaCount = var.controller.replicas, + tolerations = var.controller.tolerations, + flags = { + logLevel = var.controller.log_level + logFormat = var.controller.log_format + updateStrategy = var.controller.update_strategy + } + }), + # filter out null values + var.controller.resources == null ? null : yamlencode({ + resources = merge( + try(var.controller.resources.requests, null) == null ? {} : { requests = { for k, v in var.controller.resources.requests : k => v if v != null } }, + try(var.controller.resources.limits, null) == null ? {} : { limits = { for k, v in var.controller.resources.limits : k => v if v != null } }, + ) + }), + var.controller.image == null ? null : yamlencode(merge( + try(var.controller.image.repository, null) == null ? {} : { repository = var.controller.image.repository }, + try(var.controller.image.tag, null) == null ? {} : { tag = var.controller.image.tag }, + try(var.controller.image.pull_policy, null) == null ? {} : { pullPolicy = var.controller.image.pull_policy }, + )), + local.image_pull_secret_enabled ? yamlencode({ + # We need to wait until the secret is created before creating the controller, + # but we cannot explicitly make the whole module depend on the secret, because + # the secret depends on the namespace, and the namespace depends on the IAM role created by the module, + # even if no IAM role is created (because Terraform uses static dependencies). + imagePullSecrets = [{ name = try(kubernetes_secret_v1.controller_image_pull_secret[local.controller_namespace].metadata[0].name, var.image_pull_kubernetes_secret_name) }] + }) : null, + # additional values + yamlencode(var.controller.chart_values) + ]) + + context = module.this.context + + # Cannot depend on the namespace directly, because that would create a circular dependency (see above) + # depends_on = [kubernetes_namespace.default] +} diff --git a/modules/eks/github-actions-runner/outputs.tf b/modules/eks/github-actions-runner/outputs.tf new file mode 100644 index 000000000..22f614166 --- /dev/null +++ b/modules/eks/github-actions-runner/outputs.tf @@ -0,0 +1,25 @@ +output "metadata" { + value = module.gha_runner_controller.metadata + description = "Block status of the deployed release" +} + +output "runners" { + value = { for k, v in local.enabled_runners : k => merge({ + "1) Kubernetes namespace" = coalesce(v.kubernetes_namespace, local.controller_namespace) + "2) Runner Group" = v.group + "3) Min Runners" = v.min_replicas + "4) Max Runners" = v.max_replicas + }, + length(v.node_selector) > 0 ? { + "?) Node Selector" = v.node_selector + } : {}, + length(v.tolerations) > 0 ? { + "?) Tolerations" = v.tolerations + } : {}, + length(v.affinity) > 0 ? { + "?) Affinity" = v.affinity + } : {}, + ) + } + description = "Human-readable summary of the deployed runners" +} diff --git a/modules/eks/github-actions-runner/provider-helm.tf b/modules/eks/github-actions-runner/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/modules/eks/github-actions-runner/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/github-actions-runner/provider-ssm.tf b/modules/eks/github-actions-runner/provider-ssm.tf new file mode 100644 index 000000000..04e8b1d65 --- /dev/null +++ b/modules/eks/github-actions-runner/provider-ssm.tf @@ -0,0 +1,15 @@ +provider "aws" { + region = coalesce(var.ssm_region, var.region) + alias = "ssm" + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/eks/github-actions-runner/providers.tf b/modules/eks/github-actions-runner/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/eks/github-actions-runner/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/github-actions-runner/remote-state.tf b/modules/eks/github-actions-runner/remote-state.tf new file mode 100644 index 000000000..c1ec8226d --- /dev/null +++ b/modules/eks/github-actions-runner/remote-state.tf @@ -0,0 +1,8 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} diff --git a/modules/eks/github-actions-runner/runners.tf b/modules/eks/github-actions-runner/runners.tf new file mode 100644 index 000000000..5e21cf195 --- /dev/null +++ b/modules/eks/github-actions-runner/runners.tf @@ -0,0 +1,185 @@ +locals { + github_app_enabled = var.github_app_id != null && var.github_app_installation_id != null + create_github_secret = local.enabled && var.create_github_kubernetes_secret + github_secret_name = var.github_kubernetes_secret_name + + github_secrets = { + app = { + github_app_id = var.github_app_id + github_app_installation_id = var.github_app_installation_id + github_app_private_key = one(data.aws_ssm_parameter.github_token[*].value) + } + pat = { + github_token = one(data.aws_ssm_parameter.github_token[*].value) + } + } +} + +data "aws_ssm_parameter" "github_token" { + count = local.create_github_secret ? 1 : 0 + + name = var.ssm_github_secret_path + with_decryption = true + provider = aws.ssm +} + +resource "kubernetes_namespace" "runner" { + for_each = local.runner_namespaces_to_create + + metadata { + name = each.value + } + + # During destroy, we may need the IAM role preserved in order to run finalizers + # which remove resources. This depends_on ensures that the IAM role is not + # destroyed until after the namespace is destroyed. + depends_on = [module.gha_runners.service_account_role_unique_id] +} + +resource "kubernetes_secret_v1" "github_secret" { + for_each = local.create_github_secret ? local.runner_only_namespaces : [] + + metadata { + name = local.github_secret_name + namespace = each.value + } + + data = local.github_secrets[local.github_app_enabled ? "app" : "pat"] + + depends_on = [kubernetes_namespace.runner] +} + +resource "kubernetes_secret_v1" "image_pull_secret" { + for_each = local.create_image_pull_secret ? local.runner_only_namespaces : [] + + metadata { + name = local.image_pull_secret_name + namespace = each.value + } + + binary_data = { ".dockercfg" = local.image_pull_secret } + + type = "kubernetes.io/dockercfg" + + depends_on = [kubernetes_namespace.runner] +} + +module "gha_runners" { + for_each = local.enabled ? local.enabled_runners : {} + + source = "cloudposse/helm-release/aws" + version = "0.10.0" + + name = each.key + chart = coalesce(var.charts["runner_sets"].chart, local.runner_chart_name) + repository = var.charts["runner_sets"].chart_repository + description = var.charts["runner_sets"].chart_description + chart_version = var.charts["runner_sets"].chart_version + wait = var.charts["runner_sets"].wait + atomic = var.charts["runner_sets"].atomic + cleanup_on_fail = var.charts["runner_sets"].cleanup_on_fail + timeout = var.charts["runner_sets"].timeout + + kubernetes_namespace = coalesce(each.value.kubernetes_namespace, local.controller_namespace) + create_namespace = false # will be created above to manage duplicate namespaces + + eks_cluster_oidc_issuer_url = module.eks.outputs.eks_cluster_identity_oidc_issuer + + iam_role_enabled = false + + values = compact([ + # hardcoded values + try(file("${path.module}/resources/values-runner.yaml"), null), + yamlencode({ + githubConfigUrl = each.value.github_url + maxRunners = each.value.max_replicas + minRunners = each.value.min_replicas + runnerGroup = each.value.group + + # Create an explicit dependency on the secret to be sure it is created first. + githubConfigSecret = coalesce(each.value.kubernetes_namespace, local.controller_namespace) == local.controller_namespace ? ( + try(kubernetes_secret_v1.controller_ns_github_secret[local.controller_namespace].metadata[0].name, local.github_secret_name) + ) : ( + try(kubernetes_secret_v1.github_secret[each.value.kubernetes_namespace].metadata[0].name, local.github_secret_name) + ) + + containerMode = { + type = each.value.mode + kubernetesModeWorkVolumeClaim = { + accessModes = ["ReadWriteOnce"] + storageClassName = each.value.ephemeral_pvc_storage_class + resources = { + requests = { + storage = each.value.ephemeral_pvc_storage + } + } + } + kubernetesModeServiceAccount = { + annotations = each.value.kubernetes_mode_service_account_annotations + } + } + template = { + metadata = { + annotations = each.value.pod_annotations + labels = each.value.pod_labels + } + spec = merge( + local.image_pull_secret_enabled ? { + # We want to wait until the secret is created before creating the runner, + # but the secret might be the `controller_image_pull_secret`. That is O.K. + # because we separately depend on the controller, which depends on the secret. + imagePullSecrets = [{ name = try(kubernetes_secret_v1.image_pull_secret[each.value.kubernetes_namespace].metadata[0].name, var.image_pull_kubernetes_secret_name) }] + } : {}, + try(length(each.value.ephemeral_pvc_storage), 0) > 0 ? { + volumes = [{ + name = "work" + ephemeral = { + volumeClaimTemplate = { + spec = merge( + try(length(each.value.ephemeral_pvc_storage_class), 0) > 0 ? { + storageClassName = each.value.ephemeral_pvc_storage_class + } : {}, + { + accessModes = ["ReadWriteOnce"] + resources = { + requests = { + storage = each.value.ephemeral_pvc_storage + } + } + }) + } + } + }] + } : {}, + { + affinity = each.value.affinity + nodeSelector = each.value.node_selector + tolerations = each.value.tolerations + containers = [merge({ + name = "runner" + image = each.value.image + # command from https://github.com/actions/actions-runner-controller/blob/0bfa57ac504dfc818128f7185fc82830cbdb83f1/charts/gha-runner-scale-set/values.yaml#L193 + command = ["/home/runner/run.sh"] + }, + each.value.resources == null ? {} : { + resources = merge( + try(each.value.resources.requests, null) == null ? {} : { requests = { for k, v in each.value.resources.requests : k => v if v != null } }, + try(each.value.resources.limits, null) == null ? {} : { limits = { for k, v in each.value.resources.limits : k => v if v != null } }, + ) + }, + )] + } + ) + } + }), + local.image_pull_secret_enabled ? yamlencode({ + listenerTemplate = { + spec = { + imagePullSecrets = [{ name = try(kubernetes_secret_v1.image_pull_secret[each.value.kubernetes_namespace].metadata[0].name, var.image_pull_kubernetes_secret_name) }] + containers = [] + } } }) : null + ]) + + # Cannot depend on the namespace directly, because that would create a circular dependency (see above). + depends_on = [module.gha_runner_controller, kubernetes_secret_v1.controller_ns_github_secret] +} diff --git a/modules/eks/github-actions-runner/variables.tf b/modules/eks/github-actions-runner/variables.tf new file mode 100644 index 000000000..ee29149e3 --- /dev/null +++ b/modules/eks/github-actions-runner/variables.tf @@ -0,0 +1,223 @@ +variable "region" { + description = "AWS Region." + type = string +} + +variable "ssm_region" { + description = "AWS Region where SSM secrets are stored. Defaults to `var.region`." + type = string + default = null +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +######## Helm Chart configurations + +variable "charts" { + description = "Map of Helm charts to install. Keys are \"controller\" and \"runner_sets\"." + type = map(object({ + chart_version = string + chart = optional(string, null) # defaults according to the key to "gha-runner-scale-set-controller" or "gha-runner-scale-set" + chart_description = optional(string, null) # visible in Helm history + chart_repository = optional(string, "oci://ghcr.io/actions/actions-runner-controller-charts") + wait = optional(bool, true) + atomic = optional(bool, true) + cleanup_on_fail = optional(bool, true) + timeout = optional(number, null) + })) + validation { + condition = length(keys(var.charts)) == 2 && contains(keys(var.charts), "controller") && contains(keys(var.charts), "runner_sets") + error_message = "Must have exactly two charts: \"controller\" and \"runner_sets\"." + } +} + +######## ImagePullSecret settings + +variable "image_pull_secret_enabled" { + type = bool + description = "Whether to configure the controller and runners with an image pull secret." + default = false +} + +variable "image_pull_kubernetes_secret_name" { + type = string + description = "Name of the Kubernetes Secret that will be used as the imagePullSecret." + default = "gha-image-pull-secret" + nullable = false +} + +variable "create_image_pull_kubernetes_secret" { + type = bool + description = <<-EOT + If `true` and `image_pull_secret_enabled` is `true`, this component will create the Kubernetes image pull secret resource, + using the value in SSM at the path specified by `ssm_image_pull_secret_path`. + WARNING: This will cause the secret to be stored in plaintext in the Terraform state. + If `false`, this component will not create a secret and you must create it + (with the name given by `var.github_kubernetes_secret_name`) in every + namespace where you are deploying controllers or runners. + EOT + default = true + nullable = false +} + +variable "ssm_image_pull_secret_path" { + type = string + description = "SSM path to the base64 encoded `dockercfg` image pull secret." + default = "/github-action-runners/image-pull-secrets" + nullable = false +} + +######## Controller-specific settings + +variable "controller" { + type = object({ + image = optional(object({ + repository = optional(string, null) + tag = optional(string, null) # Defaults to the chart appVersion + pull_policy = optional(string, null) + }), null) + replicas = optional(number, 1) + kubernetes_namespace = string + create_namespace = optional(bool, true) + chart_values = optional(any, null) + affinity = optional(map(string), {}) + labels = optional(map(string), {}) + node_selector = optional(map(string), {}) + priority_class_name = optional(string, "") + resources = optional(object({ + limits = optional(object({ + cpu = optional(string, null) + memory = optional(string, null) + }), null) + requests = optional(object({ + cpu = optional(string, null) + memory = optional(string, null) + }), null) + }), null) + tolerations = optional(list(object({ + key = string + operator = string + value = optional(string, null) + effect = string + })), []) + log_level = optional(string, "info") + log_format = optional(string, "json") + update_strategy = optional(string, "immediate") + }) + description = "Configuration for the controller." +} + + +######## Runner-specific settings + +variable "github_app_id" { + type = string + description = "The ID of the GitHub App to use for the runner controller. Leave empty if using a GitHub PAT." + default = null +} + +variable "github_app_installation_id" { + type = string + description = "The \"Installation ID\" of the GitHub App to use for the runner controller. Leave empty if using a GitHub PAT." + default = null +} + +variable "ssm_github_secret_path" { + type = string + description = "The path in SSM to the GitHub app private key file contents or GitHub PAT token." + default = "/github-action-runners/github-auth-secret" + nullable = false +} + +variable "create_github_kubernetes_secret" { + type = bool + description = <<-EOT + If `true`, this component will create the Kubernetes Secret that will be used to get + the GitHub App private key or GitHub PAT token, based on the value retrieved + from SSM at the `var.ssm_github_secret_path`. WARNING: This will cause + the secret to be stored in plaintext in the Terraform state. + If `false`, this component will not create a secret and you must create it + (with the name given by `var.github_kubernetes_secret_name`) in every + namespace where you are deploying runners (the controller does not need it). + EOT + default = true +} + +variable "github_kubernetes_secret_name" { + type = string + description = "Name of the Kubernetes Secret that will be used to get the GitHub App private key or GitHub PAT token." + default = "gha-github-secret" + nullable = false +} + + +variable "runners" { + description = <<-EOT + Map of Runner Scale Set configurations, with the key being the name of the runner set. + Please note that the name must be in kebab-case (no underscores). + + For example: + + ```hcl + organization-runner = { + # Specify the scope (organization or repository) and the target + # of the runner via the `github_url` input. + # ex: https://github.com/myorg/myrepo or https://github.com/myorg + github_url = https://github.com/myorg + group = "core-automation" # Optional. Assigns the runners to a runner group, for access control. + min_replicas = 1 + max_replicas = 5 + } + ``` + EOT + + type = map(object({ + # we allow a runner to be disabled because Atmos cannot delete an inherited map object + enabled = optional(bool, true) + github_url = string + group = optional(string, null) + kubernetes_namespace = optional(string, null) # defaults to the controller's namespace + create_namespace = optional(bool, true) + image = optional(string, "ghcr.io/actions/actions-runner:latest") # repo and tag + mode = optional(string, "dind") # Optional. Can be "dind" or "kubernetes". + pod_labels = optional(map(string), {}) + pod_annotations = optional(map(string), {}) + affinity = optional(map(string), {}) + node_selector = optional(map(string), {}) + tolerations = optional(list(object({ + key = string + operator = string + value = optional(string, null) + effect = string + # tolerationSeconds is not supported, because Terraform requires all objects in a list to have the same keys, + # but tolerationSeconds must be omitted to get the default behavior of "tolerate forever". + # If really needed, could use a default value of 1,000,000,000 (one billion seconds = about 32 years). + })), []) + min_replicas = number + max_replicas = number + + # ephemeral_pvc_storage and _class are ignored for "dind" mode but required for "kubernetes" mode + ephemeral_pvc_storage = optional(string, null) # ex: 10Gi + ephemeral_pvc_storage_class = optional(string, null) + + kubernetes_mode_service_account_annotations = optional(map(string), {}) + + resources = optional(object({ + limits = optional(object({ + cpu = optional(string, null) + memory = optional(string, null) + ephemeral-storage = optional(string, null) + }), null) + requests = optional(object({ + cpu = optional(string, null) + memory = optional(string, null) + ephemeral-storage = optional(string, null) + }), null) + }), null) + })) + default = {} +} diff --git a/modules/eks/github-actions-runner/versions.tf b/modules/eks/github-actions-runner/versions.tf new file mode 100644 index 000000000..f4e52c7b2 --- /dev/null +++ b/modules/eks/github-actions-runner/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0, != 2.21.0" + } + } +} diff --git a/modules/eks/idp-roles/README.md b/modules/eks/idp-roles/README.md index d59e8f6bd..2f92cd320 100644 --- a/modules/eks/idp-roles/README.md +++ b/modules/eks/idp-roles/README.md @@ -1,6 +1,15 @@ +--- +tags: + - component/eks/idp-roles + - layer/eks + - provider/aws + - provider/helm +--- + # Component: `eks/idp-roles` -This component installs the `idp-roles` for EKS clusters. These identity provider roles specify severl pre-determined permission levels for cluster users and come with bindings that make them easy to assign to Users and Groups. +This component installs the `idp-roles` for EKS clusters. These identity provider roles specify several pre-determined +permission levels for cluster users and come with bindings that make them easy to assign to Users and Groups. ## Usage @@ -21,6 +30,7 @@ components: kubeconfig_exec_auth_api_version: "client.authentication.k8s.io/v1beta1" ``` + ## Requirements @@ -29,6 +39,7 @@ components: | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.0 | | [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.14.0, != 2.21.0 | ## Providers @@ -40,18 +51,16 @@ components: | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [idp\_roles](#module\_idp\_roles) | cloudposse/helm-release/aws | 0.6.0 | +| [idp\_roles](#module\_idp\_roles) | cloudposse/helm-release/aws | 0.10.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources | Name | Type | |------|------| -| [aws_eks_cluster.kubernetes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | | [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_eks_cluster_auth.kubernetes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | ## Inputs @@ -72,18 +81,17 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | -| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1alpha1"` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | | [kubernetes\_namespace](#input\_kubernetes\_namespace) | Kubernetes namespace to install the release into | `string` | `"kube-system"` | no | @@ -108,6 +116,8 @@ components: |------|-------------| | [metadata](#output\_metadata) | Block status of the deployed release | + ## References -* https://kubernetes.io/docs/reference/access-authn-authz/authentication/ + +- https://kubernetes.io/docs/reference/access-authn-authz/authentication/ diff --git a/modules/eks/idp-roles/charts/idp-roles/Chart.yaml b/modules/eks/idp-roles/charts/idp-roles/Chart.yaml index 35b5bbfae..19b759c5d 100644 --- a/modules/eks/idp-roles/charts/idp-roles/Chart.yaml +++ b/modules/eks/idp-roles/charts/idp-roles/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.1.0" +appVersion: "0.2.0" diff --git a/modules/eks/idp-roles/charts/idp-roles/templates/clusterrole-reader-extra.yaml b/modules/eks/idp-roles/charts/idp-roles/templates/clusterrole-reader-extra.yaml new file mode 100644 index 000000000..2e7d454db --- /dev/null +++ b/modules/eks/idp-roles/charts/idp-roles/templates/clusterrole-reader-extra.yaml @@ -0,0 +1,42 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "{{ .Values.reader_cluster_role }}-extra" + labels: + rbac.authorization.k8s.io/aggregate-to-reader: "true" +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - list + - get + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - get + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - list + - get + - apiGroups: + - karpenter.k8s.aws + resources: + - ec2nodeclasses + verbs: + - list + - get + - apiGroups: + - karpenter.sh + resources: + - nodepools + verbs: + - list + - get diff --git a/modules/eks/idp-roles/charts/idp-roles/templates/clusterrole-reader.yaml b/modules/eks/idp-roles/charts/idp-roles/templates/clusterrole-reader.yaml new file mode 100644 index 000000000..2e536dfb2 --- /dev/null +++ b/modules/eks/idp-roles/charts/idp-roles/templates/clusterrole-reader.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Values.reader_cluster_role | quote }} +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-observer: "true" + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-reader: "true" diff --git a/modules/eks/idp-roles/charts/idp-roles/templates/clusterrolebinding-reader.yaml b/modules/eks/idp-roles/charts/idp-roles/templates/clusterrolebinding-reader.yaml new file mode 100644 index 000000000..2723b9d7e --- /dev/null +++ b/modules/eks/idp-roles/charts/idp-roles/templates/clusterrolebinding-reader.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.reader_crb_name | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.reader_cluster_role | quote }} +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: {{ .Values.reader_client_role | quote }} +- apiGroup: rbac.authorization.k8s.io + kind: User + name: {{ .Values.reader_client_role | quote }} diff --git a/modules/eks/idp-roles/charts/idp-roles/values.yaml b/modules/eks/idp-roles/charts/idp-roles/values.yaml index 6d4ef2192..af8066ecc 100644 --- a/modules/eks/idp-roles/charts/idp-roles/values.yaml +++ b/modules/eks/idp-roles/charts/idp-roles/values.yaml @@ -27,3 +27,8 @@ poweruser_client_role: "idp:poweruser" observer_crb_name: "idp-observer" observer_cluster_role: "idp-observer" observer_client_role: "idp:observer" + +# Reader +reader_crb_name: "idp-reader" +reader_cluster_role: "idp-reader" +reader_client_role: "idp:reader" diff --git a/modules/eks/idp-roles/main.tf b/modules/eks/idp-roles/main.tf index 7103ea5af..2957c35ed 100644 --- a/modules/eks/idp-roles/main.tf +++ b/modules/eks/idp-roles/main.tf @@ -4,7 +4,7 @@ locals { module "idp_roles" { source = "cloudposse/helm-release/aws" - version = "0.6.0" + version = "0.10.0" # Required arguments name = module.this.name diff --git a/modules/eks/idp-roles/provider-helm.tf b/modules/eks/idp-roles/provider-helm.tf index d04bccf3d..91cc7f6d4 100644 --- a/modules/eks/idp-roles/provider-helm.tf +++ b/modules/eks/idp-roles/provider-helm.tf @@ -2,6 +2,12 @@ # # This file is a drop-in to provide a helm provider. # +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# # All the following variables are just about configuring the Kubernetes provider # to be able to modify EKS cluster. The reason there are so many options is # because at various times, each one of them has had problems, so we give you a choice. @@ -15,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -36,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -45,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string - default = "client.authentication.k8s.io/v1alpha1" + default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -95,14 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] - certificate_authority_data = module.eks.outputs.eks_cluster_certificate_authority_data - eks_cluster_id = module.eks.outputs.eks_cluster_id - eks_cluster_endpoint = module.eks.outputs.eks_cluster_endpoint + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -113,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -132,21 +174,22 @@ provider "helm" { } } experiments { - manifest = var.helm_manifest_experiment_enabled + manifest = var.helm_manifest_experiment_enabled && module.this.enabled } } provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/idp-roles/providers.tf b/modules/eks/idp-roles/providers.tf index 2775903d2..89ed50a98 100644 --- a/modules/eks/idp-roles/providers.tf +++ b/modules/eks/idp-roles/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,27 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -data "aws_eks_cluster" "kubernetes" { - count = local.enabled ? 1 : 0 - - name = module.eks.outputs.eks_cluster_id -} - -data "aws_eks_cluster_auth" "kubernetes" { - count = local.enabled ? 1 : 0 - - name = module.eks.outputs.eks_cluster_id -} diff --git a/modules/eks/idp-roles/remote-state.tf b/modules/eks/idp-roles/remote-state.tf index 89a89a442..c1ec8226d 100644 --- a/modules/eks/idp-roles/remote-state.tf +++ b/modules/eks/idp-roles/remote-state.tf @@ -1,9 +1,8 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.0" + version = "1.5.0" component = var.eks_component_name context = module.this.context } - diff --git a/modules/eks/idp-roles/versions.tf b/modules/eks/idp-roles/versions.tf index cbf605948..ec64f8a4f 100644 --- a/modules/eks/idp-roles/versions.tf +++ b/modules/eks/idp-roles/versions.tf @@ -10,5 +10,9 @@ terraform { source = "hashicorp/helm" version = ">= 2.0" } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.14.0, != 2.21.0" + } } } diff --git a/modules/eks/karpenter-node-pool/CHANGELOG.md b/modules/eks/karpenter-node-pool/CHANGELOG.md new file mode 100644 index 000000000..c8402e80e --- /dev/null +++ b/modules/eks/karpenter-node-pool/CHANGELOG.md @@ -0,0 +1,17 @@ +## Release 1.470.0 + +Components PR [#1076](https://github.com/cloudposse/terraform-aws-components/pull/1076) + +- Allow specifying elements of `spec.template.spec.kubelet` +- Make taint values optional + +The `var.node_pools` map now includes a `kubelet` field that allows specifying elements of `spec.template.spec.kubelet`. +This is useful for configuring the kubelet to use custom settings, such as reserving resources for system daemons. + +For more information, see: + +- [Karpenter documentation](https://karpenter.sh/docs/concepts/nodepools/#spectemplatespeckubelet) +- [Kubernetes documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/) + +The `value` fields of the `taints` and `startup_taints` lists in the `var.node_pools` map are now optional. This is in +alignment with the Kubernetes API, where `key` and `effect` are required, but the `value` field is optional. diff --git a/modules/eks/karpenter-node-pool/README.md b/modules/eks/karpenter-node-pool/README.md new file mode 100644 index 000000000..8bfefb308 --- /dev/null +++ b/modules/eks/karpenter-node-pool/README.md @@ -0,0 +1,240 @@ +--- +tags: + - component/eks/karpenter-node-pool + - layer/eks + - provider/aws + - provider/helm +--- + +# Component: `eks/karpenter-node-pool` + +This component deploys [Karpenter NodePools](https://karpenter.sh/docs/concepts/nodepools/) to an EKS cluster. + +Karpenter is still in v0 and rapidly evolving. At this time, this component only supports a subset of the features +available in Karpenter. Support could be added for additional features as needed. + +Not supported: + +- Elements of NodePool: + - [`template.spec.kubelet`](https://karpenter.sh/docs/concepts/nodepools/#spectemplatespeckubelet) + - [`limits`](https://karpenter.sh/docs/concepts/nodepools/#limits) currently only supports `cpu` and `memory`. Other + limits such as `nvidia.com/gpu` are not supported. +- Elements of NodeClass: + - `subnetSelectorTerms`. This component only supports selecting all public or all private subnets of the referenced + EKS cluster. + - `securityGroupSelectorTerms`. This component only supports selecting the security group of the referenced EKS + cluster. + - `amiSelectorTerms`. Such terms override the `amiFamily` setting, which is the only AMI selection supported by this + component. + - `instanceStorePolicy` + - `userData` + - `detailedMonitoring` + - `associatePublicIPAddress` + +## Usage + +**Stack Level**: Regional + +If provisioning more than one NodePool, it is +[best practice](https://aws.github.io/aws-eks-best-practices/karpenter/#creating-nodepools) to create NodePools that are +mutually exclusive or weighted. + +```yaml +components: + terraform: + eks/karpenter-node-pool: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + eks_component_name: eks/cluster + name: "karpenter-node-pool" + # https://karpenter.sh/v0.36.0/docs/concepts/nodepools/ + node_pools: + default: + name: default + # Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets + private_subnets_enabled: true + disruption: + consolidation_policy: WhenUnderutilized + consolidate_after: 1h + max_instance_lifetime: 336h + budgets: + # This budget allows 0 disruptions during business hours (from 9am to 5pm) on weekdays + - schedule: "0 9 * * mon-fri" + duration: 8h + nodes: "0" + # The total cpu of the cluster. Maps to spec.limits.cpu in the Karpenter NodeClass + total_cpu_limit: "100" + # The total memory of the cluster. Maps to spec.limits.memory in the Karpenter NodeClass + total_memory_limit: "1000Gi" + # The weight of the node pool. See https://karpenter.sh/docs/concepts/scheduling/#weighted-nodepools + weight: 50 + # Taints to apply to the nodes in the node pool. See https://karpenter.sh/docs/concepts/nodeclasses/#spectaints + taints: + - key: "node.kubernetes.io/unreachable" + effect: "NoExecute" + value: "true" + # Taints to apply to the nodes in the node pool at startup. See https://karpenter.sh/docs/concepts/nodeclasses/#specstartuptaints + startup_taints: + - key: "node.kubernetes.io/unreachable" + effect: "NoExecute" + value: "true" + # Metadata options for the node pool. See https://karpenter.sh/docs/concepts/nodeclasses/#specmetadataoptions + metadata_options: + httpEndpoint: "enabled" # allows the node to call the AWS metadata service + httpProtocolIPv6: "disabled" + httpPutResponseHopLimit: 2 + httpTokens: "required" + # The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM) + # Bottlerocket, AL2, Ubuntu + # https://karpenter.sh/v0.18.0/aws/provisioning/#amazon-machine-image-ami-family + ami_family: AL2 + # Karpenter provisioner block device mappings. + block_device_mappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 200Gi + volumeType: gp3 + encrypted: true + deleteOnTermination: true + # Set acceptable (In) and unacceptable (Out) Kubernetes and Karpenter values for node provisioning based on + # Well-Known Labels and cloud-specific settings. These can include instance types, zones, computer architecture, + # and capacity type (such as AWS spot or on-demand). + # See https://karpenter.sh/v0.18.0/provisioner/#specrequirements for more details + requirements: + - key: "karpenter.sh/capacity-type" + operator: "In" + values: + - "on-demand" + - "spot" + - key: "node.kubernetes.io/instance-type" + operator: "In" + # See https://aws.amazon.com/ec2/instance-explorer/ and https://aws.amazon.com/ec2/instance-types/ + # Values limited by DenyEC2InstancesWithoutEncryptionInTransit service control policy + # See https://github.com/cloudposse/terraform-aws-service-control-policies/blob/master/catalog/ec2-policies.yaml + # Karpenter recommends allowing at least 20 instance types to ensure availability. + values: + - "c5n.2xlarge" + - "c5n.xlarge" + - "c5n.large" + - "c6i.2xlarge" + - "c6i.xlarge" + - "c6i.large" + - "m5n.2xlarge" + - "m5n.xlarge" + - "m5n.large" + - "m5zn.2xlarge" + - "m5zn.xlarge" + - "m5zn.large" + - "m6i.2xlarge" + - "m6i.xlarge" + - "m6i.large" + - "r5n.2xlarge" + - "r5n.xlarge" + - "r5n.large" + - "r6i.2xlarge" + - "r6i.xlarge" + - "r6i.large" + - key: "kubernetes.io/arch" + operator: "In" + values: + - "amd64" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_manifest.ec2_node_class](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_manifest.node_pool](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [node\_pools](#input\_node\_pools) | Configuration for node pools. See code for details. |
map(object({
# The name of the Karpenter provisioner. The map key is used if this is not set.
name = optional(string)
# Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets.
private_subnets_enabled = bool
# The Disruption spec controls how Karpenter scales down the node group.
# See the example (sadly not the specific `spec.disruption` documentation) at https://karpenter.sh/docs/concepts/nodepools/ for details
disruption = optional(object({
# Describes which types of Nodes Karpenter should consider for consolidation.
# If using 'WhenUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or
# replace Nodes when it discovers that the Node is underutilized and could be changed to reduce cost.
# If using `WhenEmpty`, Karpenter will only consider nodes for consolidation that contain no workload pods.
consolidation_policy = optional(string, "WhenUnderutilized")

# The amount of time Karpenter should wait after discovering a consolidation decision (`go` duration string, s, m, or h).
# This value can currently (v0.36.0) only be set when the consolidationPolicy is 'WhenEmpty'.
# You can choose to disable consolidation entirely by setting the string value 'Never' here.
# Earlier versions of Karpenter called this field `ttl_seconds_after_empty`.
consolidate_after = optional(string)

# The amount of time a Node can live on the cluster before being removed (`go` duration string, s, m, or h).
# You can choose to disable expiration entirely by setting the string value 'Never' here.
# This module sets a default of 336 hours (14 days), while the Karpenter default is 720 hours (30 days).
# Note that Karpenter calls this field "expiresAfter", and earlier versions called it `ttl_seconds_until_expired`,
# but we call it "max_instance_lifetime" to match the corresponding field in EC2 Auto Scaling Groups.
max_instance_lifetime = optional(string, "336h")

# Budgets control the the maximum number of NodeClaims owned by this NodePool that can be terminating at once.
# See https://karpenter.sh/docs/concepts/disruption/#disruption-budgets for details.
# A percentage is the percentage of the total number of active, ready nodes not being deleted, rounded up.
# If there are multiple active budgets, Karpenter uses the most restrictive value.
# If left undefined, this will default to one budget with a value of nodes: 10%.
# Note that budgets do not prevent or limit involuntary terminations.
# Example:
# On Weekdays during business hours, don't do any deprovisioning.
# budgets = {
# schedule = "0 9 * * mon-fri"
# duration = 8h
# nodes = "0"
# }
budgets = optional(list(object({
# The schedule specifies when a budget begins being active, using extended cronjob syntax.
# See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax for syntax details.
# Timezones are not supported. This field is required if Duration is set.
schedule = optional(string)
# Duration determines how long a Budget is active after each Scheduled start.
# If omitted, the budget is always active. This is required if Schedule is set.
# Must be a whole number of minutes and hours, as cron does not work in seconds,
# but since Go's `duration.String()` always adds a "0s" at the end, that is allowed.
duration = optional(string)
# The percentage or number of nodes that Karpenter can scale down during the budget.
nodes = string
})), [])
}), {})
# Karpenter provisioner total CPU limit for all pods running on the EC2 instances launched by Karpenter
total_cpu_limit = string
# Karpenter provisioner total memory limit for all pods running on the EC2 instances launched by Karpenter
total_memory_limit = string
# Set a weight for this node pool.
# See https://karpenter.sh/docs/concepts/scheduling/#weighted-nodepools
weight = optional(number, 50)
labels = optional(map(string))
annotations = optional(map(string))
# Karpenter provisioner taints configuration. See https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive for more details
taints = optional(list(object({
key = string
effect = string
value = optional(string)
})))
startup_taints = optional(list(object({
key = string
effect = string
value = optional(string)
})))
# Karpenter node metadata options. See https://karpenter.sh/docs/concepts/nodeclasses/#specmetadataoptions for more details
metadata_options = optional(object({
httpEndpoint = optional(string, "enabled")
httpProtocolIPv6 = optional(string, "disabled")
httpPutResponseHopLimit = optional(number, 2)
# httpTokens can be either "required" or "optional"
httpTokens = optional(string, "required")
}), {})
# The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM)
ami_family = string
# Karpenter nodes block device mappings. Controls the Elastic Block Storage volumes that Karpenter attaches to provisioned nodes.
# Karpenter uses default block device mappings for the AMI Family specified.
# For example, the Bottlerocket AMI Family defaults with two block device mappings,
# and normally you only want to scale `/dev/xvdb` where Containers and there storage are stored.
# Most other AMIs only have one device mapping at `/dev/xvda`.
# See https://karpenter.sh/docs/concepts/nodeclasses/#specblockdevicemappings for more details
block_device_mappings = list(object({
deviceName = string
ebs = optional(object({
volumeSize = string
volumeType = string
deleteOnTermination = optional(bool, true)
encrypted = optional(bool, true)
iops = optional(number)
kmsKeyID = optional(string, "alias/aws/ebs")
snapshotID = optional(string)
throughput = optional(number)
}))
}))
# Set acceptable (In) and unacceptable (Out) Kubernetes and Karpenter values for node provisioning based on Well-Known Labels and cloud-specific settings. These can include instance types, zones, computer architecture, and capacity type (such as AWS spot or on-demand). See https://karpenter.sh/v0.18.0/provisioner/#specrequirements for more details
requirements = list(object({
key = string
operator = string
# Operators like "Exists" and "DoesNotExist" do not require a value
values = optional(list(string))
}))
# Any values for spec.template.spec.kubelet allowed by Karpenter.
# Not fully specified, because they are subject to change.
# See:
# https://karpenter.sh/docs/concepts/nodepools/#spectemplatespeckubelet
# https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/
kubelet = optional(any, {})
}))
| n/a | yes | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [ec2\_node\_classes](#output\_ec2\_node\_classes) | Deployed Karpenter EC2NodeClass | +| [node\_pools](#output\_node\_pools) | Deployed Karpenter NodePool | + + + +## References + +- https://karpenter.sh +- https://aws.github.io/aws-eks-best-practices/karpenter +- https://karpenter.sh/docs/concepts/nodepools +- https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler +- https://github.com/aws/karpenter +- https://ec2spotworkshops.com/karpenter.html +- https://www.eksworkshop.com/docs/autoscaling/compute/karpenter/ + +[](https://cpco.io/component) diff --git a/modules/eks/karpenter-node-pool/context.tf b/modules/eks/karpenter-node-pool/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/karpenter-node-pool/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/karpenter-node-pool/ec2-node-class.tf b/modules/eks/karpenter-node-pool/ec2-node-class.tf new file mode 100644 index 000000000..7be308615 --- /dev/null +++ b/modules/eks/karpenter-node-pool/ec2-node-class.tf @@ -0,0 +1,53 @@ +# This provisions the EC2NodeClass for the NodePool. +# https://karpenter.sh/docs/concepts/nodeclasses/ +# +# We keep it separate from the NodePool creation, +# even though there is a 1-to-1 mapping between the two, +# to make it a little easier to compare the implementation here +# with the Karpenter documentation, and to track changes as +# Karpenter evolves. +# + + +locals { + # If you include a field but set it to null, the field will be omitted from the Kubernetes resource, + # but the Kubernetes provider will still try to include it with a null value, + # which will cause perpetual diff in the Terraform plan. + # We strip out the null values from block_device_mappings here, because it is too complicated to do inline. + node_block_device_mappings = { for pk, pv in local.node_pools : pk => [ + for i, map in pv.block_device_mappings : merge({ + for dk, dv in map : dk => dv if dk != "ebs" && dv != null + }, try(length(map.ebs), 0) == 0 ? {} : { ebs = { for ek, ev in map.ebs : ek => ev if ev != null } }) + ] + } +} + +# https://karpenter.sh/docs/concepts/nodeclasses/ +resource "kubernetes_manifest" "ec2_node_class" { + for_each = local.node_pools + + manifest = { + apiVersion = "karpenter.k8s.aws/v1beta1" + kind = "EC2NodeClass" + metadata = { + name = coalesce(each.value.name, each.key) + } + spec = merge({ + role = module.eks.outputs.karpenter_iam_role_name + subnetSelectorTerms = [for id in(each.value.private_subnets_enabled ? local.private_subnet_ids : local.public_subnet_ids) : { + id = id + }] + securityGroupSelectorTerms = [{ + tags = { + "aws:eks:cluster-name" = local.eks_cluster_id + } + }] + # https://karpenter.sh/v0.18.0/aws/provisioning/#amazon-machine-image-ami-family + amiFamily = each.value.ami_family + metadataOptions = each.value.metadata_options + tags = module.this.tags + }, try(length(local.node_block_device_mappings[each.key]), 0) == 0 ? {} : { + blockDeviceMappings = local.node_block_device_mappings[each.key] + }) + } +} diff --git a/modules/eks/karpenter-node-pool/main.tf b/modules/eks/karpenter-node-pool/main.tf new file mode 100644 index 000000000..d43d8d2ac --- /dev/null +++ b/modules/eks/karpenter-node-pool/main.tf @@ -0,0 +1,82 @@ +# Create Provisioning Configuration +# https://karpenter.sh/docs/concepts/ + +locals { + enabled = module.this.enabled + + private_subnet_ids = module.vpc.outputs.private_subnet_ids + public_subnet_ids = module.vpc.outputs.public_subnet_ids + + node_pools = { for k, v in var.node_pools : k => v if local.enabled } + kubelets_specs_filtered = { for k, v in local.node_pools : k => { + for kk, vv in v.kubelet : kk => vv if vv != null + } + } + kubelet_specs = { for k, v in local.kubelets_specs_filtered : k => v if length(v) > 0 } +} + +# https://karpenter.sh/docs/concepts/nodepools/ + +resource "kubernetes_manifest" "node_pool" { + for_each = local.node_pools + + manifest = { + apiVersion = "karpenter.sh/v1beta1" + kind = "NodePool" + metadata = { + name = coalesce(each.value.name, each.key) + } + spec = { + limits = { + cpu = each.value.total_cpu_limit + memory = each.value.total_memory_limit + } + weight = each.value.weight + disruption = merge({ + consolidationPolicy = each.value.disruption.consolidation_policy + expireAfter = each.value.disruption.max_instance_lifetime + }, + each.value.disruption.consolidate_after == null ? {} : { + consolidateAfter = each.value.disruption.consolidate_after + }, + length(each.value.disruption.budgets) == 0 ? {} : { + budgets = each.value.disruption.budgets + } + ) + template = { + metadata = { + labels = coalesce(each.value.labels, {}) + annotations = coalesce(each.value.annotations, {}) + } + spec = merge({ + nodeClassRef = { + apiVersion = "karpenter.k8s.aws/v1beta1" + kind = "EC2NodeClass" + name = coalesce(each.value.name, each.key) + } + }, + try(length(each.value.requirements), 0) == 0 ? {} : { + requirements = [for r in each.value.requirements : merge({ + key = r.key + operator = r.operator + }, + try(length(r.values), 0) == 0 ? {} : { + values = r.values + })] + }, + try(length(each.value.taints), 0) == 0 ? {} : { + taints = each.value.taints + }, + try(length(each.value.startup_taints), 0) == 0 ? {} : { + startupTaints = each.value.startup_taints + }, + try(local.kubelet_specs[each.key], null) == null ? {} : { + kubelet = local.kubelet_specs[each.key] + } + ) + } + } + } + + depends_on = [kubernetes_manifest.ec2_node_class] +} diff --git a/modules/eks/karpenter-node-pool/outputs.tf b/modules/eks/karpenter-node-pool/outputs.tf new file mode 100644 index 000000000..507f516cc --- /dev/null +++ b/modules/eks/karpenter-node-pool/outputs.tf @@ -0,0 +1,9 @@ +output "node_pools" { + value = kubernetes_manifest.node_pool + description = "Deployed Karpenter NodePool" +} + +output "ec2_node_classes" { + value = kubernetes_manifest.ec2_node_class + description = "Deployed Karpenter EC2NodeClass" +} diff --git a/modules/eks/karpenter-node-pool/provider-helm.tf b/modules/eks/karpenter-node-pool/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/modules/eks/karpenter-node-pool/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/efs/providers.tf b/modules/eks/karpenter-node-pool/providers.tf similarity index 100% rename from modules/eks/efs/providers.tf rename to modules/eks/karpenter-node-pool/providers.tf diff --git a/modules/eks/karpenter-node-pool/remote-state.tf b/modules/eks/karpenter-node-pool/remote-state.tf new file mode 100644 index 000000000..ffca1d833 --- /dev/null +++ b/modules/eks/karpenter-node-pool/remote-state.tf @@ -0,0 +1,24 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + defaults = { + eks_cluster_id = "deleted" + eks_cluster_arn = "deleted" + eks_cluster_identity_oidc_issuer = "deleted" + karpenter_node_role_arn = "deleted" + } + + context = module.this.context +} + +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "vpc" + + context = module.this.context +} diff --git a/modules/eks/karpenter-node-pool/variables.tf b/modules/eks/karpenter-node-pool/variables.tf new file mode 100644 index 000000000..522e79e77 --- /dev/null +++ b/modules/eks/karpenter-node-pool/variables.tf @@ -0,0 +1,132 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "node_pools" { + type = map(object({ + # The name of the Karpenter provisioner. The map key is used if this is not set. + name = optional(string) + # Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets. + private_subnets_enabled = bool + # The Disruption spec controls how Karpenter scales down the node group. + # See the example (sadly not the specific `spec.disruption` documentation) at https://karpenter.sh/docs/concepts/nodepools/ for details + disruption = optional(object({ + # Describes which types of Nodes Karpenter should consider for consolidation. + # If using 'WhenUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or + # replace Nodes when it discovers that the Node is underutilized and could be changed to reduce cost. + # If using `WhenEmpty`, Karpenter will only consider nodes for consolidation that contain no workload pods. + consolidation_policy = optional(string, "WhenUnderutilized") + + # The amount of time Karpenter should wait after discovering a consolidation decision (`go` duration string, s, m, or h). + # This value can currently (v0.36.0) only be set when the consolidationPolicy is 'WhenEmpty'. + # You can choose to disable consolidation entirely by setting the string value 'Never' here. + # Earlier versions of Karpenter called this field `ttl_seconds_after_empty`. + consolidate_after = optional(string) + + # The amount of time a Node can live on the cluster before being removed (`go` duration string, s, m, or h). + # You can choose to disable expiration entirely by setting the string value 'Never' here. + # This module sets a default of 336 hours (14 days), while the Karpenter default is 720 hours (30 days). + # Note that Karpenter calls this field "expiresAfter", and earlier versions called it `ttl_seconds_until_expired`, + # but we call it "max_instance_lifetime" to match the corresponding field in EC2 Auto Scaling Groups. + max_instance_lifetime = optional(string, "336h") + + # Budgets control the the maximum number of NodeClaims owned by this NodePool that can be terminating at once. + # See https://karpenter.sh/docs/concepts/disruption/#disruption-budgets for details. + # A percentage is the percentage of the total number of active, ready nodes not being deleted, rounded up. + # If there are multiple active budgets, Karpenter uses the most restrictive value. + # If left undefined, this will default to one budget with a value of nodes: 10%. + # Note that budgets do not prevent or limit involuntary terminations. + # Example: + # On Weekdays during business hours, don't do any deprovisioning. + # budgets = { + # schedule = "0 9 * * mon-fri" + # duration = 8h + # nodes = "0" + # } + budgets = optional(list(object({ + # The schedule specifies when a budget begins being active, using extended cronjob syntax. + # See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax for syntax details. + # Timezones are not supported. This field is required if Duration is set. + schedule = optional(string) + # Duration determines how long a Budget is active after each Scheduled start. + # If omitted, the budget is always active. This is required if Schedule is set. + # Must be a whole number of minutes and hours, as cron does not work in seconds, + # but since Go's `duration.String()` always adds a "0s" at the end, that is allowed. + duration = optional(string) + # The percentage or number of nodes that Karpenter can scale down during the budget. + nodes = string + })), []) + }), {}) + # Karpenter provisioner total CPU limit for all pods running on the EC2 instances launched by Karpenter + total_cpu_limit = string + # Karpenter provisioner total memory limit for all pods running on the EC2 instances launched by Karpenter + total_memory_limit = string + # Set a weight for this node pool. + # See https://karpenter.sh/docs/concepts/scheduling/#weighted-nodepools + weight = optional(number, 50) + labels = optional(map(string)) + annotations = optional(map(string)) + # Karpenter provisioner taints configuration. See https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive for more details + taints = optional(list(object({ + key = string + effect = string + value = optional(string) + }))) + startup_taints = optional(list(object({ + key = string + effect = string + value = optional(string) + }))) + # Karpenter node metadata options. See https://karpenter.sh/docs/concepts/nodeclasses/#specmetadataoptions for more details + metadata_options = optional(object({ + httpEndpoint = optional(string, "enabled") + httpProtocolIPv6 = optional(string, "disabled") + httpPutResponseHopLimit = optional(number, 2) + # httpTokens can be either "required" or "optional" + httpTokens = optional(string, "required") + }), {}) + # The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM) + ami_family = string + # Karpenter nodes block device mappings. Controls the Elastic Block Storage volumes that Karpenter attaches to provisioned nodes. + # Karpenter uses default block device mappings for the AMI Family specified. + # For example, the Bottlerocket AMI Family defaults with two block device mappings, + # and normally you only want to scale `/dev/xvdb` where Containers and there storage are stored. + # Most other AMIs only have one device mapping at `/dev/xvda`. + # See https://karpenter.sh/docs/concepts/nodeclasses/#specblockdevicemappings for more details + block_device_mappings = list(object({ + deviceName = string + ebs = optional(object({ + volumeSize = string + volumeType = string + deleteOnTermination = optional(bool, true) + encrypted = optional(bool, true) + iops = optional(number) + kmsKeyID = optional(string, "alias/aws/ebs") + snapshotID = optional(string) + throughput = optional(number) + })) + })) + # Set acceptable (In) and unacceptable (Out) Kubernetes and Karpenter values for node provisioning based on Well-Known Labels and cloud-specific settings. These can include instance types, zones, computer architecture, and capacity type (such as AWS spot or on-demand). See https://karpenter.sh/v0.18.0/provisioner/#specrequirements for more details + requirements = list(object({ + key = string + operator = string + # Operators like "Exists" and "DoesNotExist" do not require a value + values = optional(list(string)) + })) + # Any values for spec.template.spec.kubelet allowed by Karpenter. + # Not fully specified, because they are subject to change. + # See: + # https://karpenter.sh/docs/concepts/nodepools/#spectemplatespeckubelet + # https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ + kubelet = optional(any, {}) + })) + description = "Configuration for node pools. See code for details." + nullable = false +} diff --git a/modules/eks/karpenter-node-pool/versions.tf b/modules/eks/karpenter-node-pool/versions.tf new file mode 100644 index 000000000..b58e8e98f --- /dev/null +++ b/modules/eks/karpenter-node-pool/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.7.1, != 2.21.0" + } + } +} diff --git a/modules/eks/karpenter/CHANGELOG.md b/modules/eks/karpenter/CHANGELOG.md new file mode 100644 index 000000000..55e5d90f3 --- /dev/null +++ b/modules/eks/karpenter/CHANGELOG.md @@ -0,0 +1,144 @@ +## Release 1.470.0 + +Components PR [#1076](https://github.com/cloudposse/terraform-aws-components/pull/1076) + +#### Bugfix + +- Fixed issues with IAM Policy support for cleaning up `v1alpha` resources. + +With the previous release of this component, we encouraged users to delete their `v1alpha` Karpenter resources before +upgrading to `v1beta`. However, certain things, such as EC2 Instance Profiles, would not be deleted by Terraform because +they were created or modified by the Karpenter controller. + +To enable the `v1beta` Karpenter controller to clean up these resources, we added a second IAM Policy to the official +Karpenter IAM Policy document. This second policy allows the Karpenter controller to delete the `v1alpha` resources. +However, there were 2 problems with that. + +First, the policy was subtly incorrect, and did not, in fact, allow the Karpenter controller to delete all the +resources. This has been fixed. + +Second, a long EKS cluster name could cause the Karpenter IRSA's policy to exceed the maximum character limit for an IAM +Policy. This has also been fixed by making the `v1alpha` policy a separate managed policy attached to the Karpenter +controller's role, rather than merging the statements into the `v1beta` policy. This change also avoids potential +conflicts with policy SIDs. + +> [!NOTE] +> +> #### Innocuous Changes +> +> Terraform will show IAM Policy changes, including deletion of statements from the existing policy and creation of a +> new policy. This is expected and innocuous. The IAM Policy has been split into 2 to avoid exceeding length limits, but +> the current (`v1beta`) policy remains the same and the now separate (`v1alpha`) policy has been corrected. + +## Version 1.445.0 + +Components [PR #1039](https://github.com/cloudposse/terraform-aws-components/pull/1039) + +> [!WARNING] +> +> #### Major Breaking Changes +> +> Karpenter at version v0.33.0 transitioned from the `v1alpha` API to the `v1beta` API with many breaking changes. This +> component (`eks/karpenter`) changed as well, dropping support for the `v1alpha` API and adding support for the +> `v1beta` API. At the same time, the corresponding `eks/karpenter-provisioner` component was replaced with the +> `eks/karpenter-node-pool` component. The old components remain available under the +> [`deprecated/`](https://github.com/cloudposse/terraform-aws-components/tree/main/deprecated) directory. + +The full list of changes in Karpenter is too extensive to repeat here. See the +[Karpenter v1beta Migration Guide](https://karpenter.sh/v0.32/upgrading/v1beta1-migration/) and the +[Karpenter Upgrade Guide](https://karpenter.sh/docs/upgrading/upgrade-guide/) for details. + +While a zero-downtime upgrade is possible, it is very complex and tedious and Cloud Posse does not support it at this +time. Instead, we recommend you delete your existing Karpenter Provisioner (`karpenter-provisioner`) and Controller +(`karpenter`) deployments, which will scale your cluster to zero and leave all your pods suspended, and then deploy the +new components, which will resume your pods. + +Full details of the recommended migration process for these components can be found in the +[Migration Guide](https://github.com/cloudposse/terraform-aws-components/blob/main/modules/eks/karpenter/docs/v1alpha-to-v1beta-migration.md). + +If you require a zero-downtime upgrade, please contact +[Cloud Posse professional services](https://cloudposse.com/services/) for assistance. + +## Version 1.348.0 + +Components PR [#868](https://github.com/cloudposse/terraform-aws-components/pull/868) + +The `karpenter-crd` helm chart can now be installed alongside the `karpenter` helm chart to automatically manage the +lifecycle of Karpenter CRDs. However since this chart must be installed before the `karpenter` helm chart, the +Kubernetes namespace must be available before either chart is deployed. Furthermore, this namespace should persist +whether or not the `karpenter-crd` chart is deployed, so it should not be installed with that given `helm-release` +resource. Therefore, we've moved namespace creation to a separate resource that runs before both charts. Terraform will +handle that namespace state migration with the `moved` block. + +There are several scenarios that may or may not require additional steps. Please review the following scenarios and +follow the steps for your given requirements. + +### Upgrading an existing `eks/karpenter` deployment without changes + +If you currently have `eks/karpenter` deployed to an EKS cluster and have upgraded to this version of the component, no +changes are required. `var.crd_chart_enabled` will default to `false`. + +### Upgrading an existing `eks/karpenter` deployment and deploying the `karpenter-crd` chart + +If you currently have `eks/karpenter` deployed to an EKS cluster, have upgraded to this version of the component, do not +currently have the `karpenter-crd` chart installed, and want to now deploy the `karpenter-crd` helm chart, a few +additional steps are required! + +First, set `var.crd_chart_enabled` to `true`. + +Next, update the installed Karpenter CRDs in order for Helm to automatically take over their management when the +`karpenter-crd` chart is deployed. We have included a script to run that upgrade. Run the `./karpenter-crd-upgrade` +script or run the following commands on the given cluster before deploying the chart. Please note that this script or +commands will only need to be run on first use of the CRD chart. + +Before running the script, ensure that the `kubectl` context is set to the cluster where the `karpenter` helm chart is +deployed. In Geodesic, you can usually do this with the `set-cluster` command, though your configuration may vary. + +```bash +set-cluster -- terraform +``` + +Then run the script or commands: + +```bash +kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite +kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite +kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite +``` + +> [!NOTE] +> +> Previously the `karpenter-crd-upgrade` script included deploying the `karpenter-crd` chart. Now that this chart is +> moved to Terraform, that helm deployment is no longer necessary. +> +> For reference, the `karpenter-crd` chart can be installed with helm with the following: +> +> ```bash +> helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "$VERSION" --namespace karpenter +> ``` + +Now that the CRDs are upgraded, the component is ready to be applied. Apply the `eks/karpenter` component and then apply +`eks/karpenter-provisioner`. + +#### Note for upgrading Karpenter from before v0.27.3 to v0.27.3 or later + +If you are upgrading Karpenter from before v0.27.3 to v0.27.3 or later, you may need to run the following command to +remove an obsolete webhook: + +```bash +kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh +``` + +See [the Karpenter upgrade guide](https://karpenter.sh/v0.32/upgrading/upgrade-guide/#upgrading-to-v0273) for more +details. + +### Upgrading an existing `eks/karpenter` deployment where the `karpenter-crd` chart is already deployed + +If you currently have `eks/karpenter` deployed to an EKS cluster, have upgraded to this version of the component, and +already have the `karpenter-crd` chart installed, simply set `var.crd_chart_enabled` to `true` and redeploy Terraform to +have Terraform manage the helm release for `karpenter-crd`. + +### Net new deployments + +If you are initially deploying `eks/karpenter`, no changes are required, but we recommend installing the CRD chart. Set +`var.crd_chart_enabled` to `true` and continue with deployment. diff --git a/modules/eks/karpenter/README.md b/modules/eks/karpenter/README.md index 2cd9c3c91..4234e3cff 100644 --- a/modules/eks/karpenter/README.md +++ b/modules/eks/karpenter/README.md @@ -1,40 +1,42 @@ +--- +tags: + - component/eks/karpenter + - layer/eks + - provider/aws + - provider/helm +--- + # Component: `eks/karpenter` -This component provisions [Karpenter](https://karpenter.sh) on an EKS cluster. +This component provisions [Karpenter](https://karpenter.sh) on an EKS cluster. It requires at least version 0.32.0 of +Karpenter, though you are encouraged to use the latest version. ## Usage **Stack Level**: Regional -These instructions assume you are provisioning 2 EKS clusters in the same account -and region, named "blue" and "green", and alternating between them. -If you are only using a single cluster, you can ignore the "blue" and "green" -references and remove the `metadata` block from the `karpenter` module. +These instructions assume you are provisioning 2 EKS clusters in the same account and region, named "blue" and "green", +and alternating between them. If you are only using a single cluster, you can ignore the "blue" and "green" references +and remove the `metadata` block from the `karpenter` module. ```yaml components: terraform: - # Base component of all `karpenter` components eks/karpenter: metadata: type: abstract - settings: - spacelift: - workspace_enabled: true vars: enabled: true - root_account_tenant_name: core - tags: - Team: sre - Service: karpenter - eks_component_name: eks/cluster + eks_component_name: "eks/cluster" name: "karpenter" + # https://github.com/aws/karpenter/tree/main/charts/karpenter + chart_repository: "oci://public.ecr.aws/karpenter" chart: "karpenter" - chart_repository: "https://charts.karpenter.sh" - chart_version: "v0.16.3" - create_namespace: true - kubernetes_namespace: "karpenter" + chart_version: "v0.36.0" + # Enable Karpenter to get advance notice of spot instances being terminated + # See https://karpenter.sh/docs/concepts/#interruption + interruption_handler_enabled: true resources: limits: cpu: "300m" @@ -46,39 +48,45 @@ components: atomic: true wait: true rbac_enabled: true - - # Provision `karpenter` component on the blue EKS cluster - eks/karpenter-blue: - metadata: - component: eks/karpenter - inherits: - - eks/karpenter - vars: - eks_component_name: eks/cluster-blue + # "karpenter-crd" can be installed as an independent helm chart to manage the lifecycle of Karpenter CRDs + crd_chart_enabled: true + crd_chart: "karpenter-crd" + # replicas set the number of Karpenter controller replicas to run + replicas: 2 + # "settings" controls a subset of the settings for the Karpenter controller regarding batch idle and max duration. + # you can read more about these settings here: https://karpenter.sh/docs/reference/settings/ + settings: + batch_idle_duration: "1s" + batch_max_duration: "10s" + # The logging settings for the Karpenter controller + logging: + enabled: true + level: + controller: "info" + global: "info" + webhook: "error" ``` ## Provision Karpenter on EKS cluster -Here we describe how to provision Karpenter on an EKS cluster. -We will be using the `plat-ue2-dev` stack as an example. +Here we describe how to provision Karpenter on an EKS cluster. We will be using the `plat-ue2-dev` stack as an example. ### Provision Service-Linked Roles for EC2 Spot and EC2 Spot Fleet -__Note:__ If you want to use EC2 Spot for the instances launched by Karpenter, -you may need to provision the following Service-Linked Role for EC2 Spot: +**Note:** If you want to use EC2 Spot for the instances launched by Karpenter, you may need to provision the following +Service-Linked Role for EC2 Spot: - Service-Linked Role for EC2 Spot -This is only necessary if this is the first time you're using EC2 Spot in the account. -Since this is a one-time operation, we recommend you do this manually via -the AWS CLI: +This is only necessary if this is the first time you're using EC2 Spot in the account. Since this is a one-time +operation, we recommend you do this manually via the AWS CLI: ```bash aws --profile --gbl--admin iam create-service-linked-role --aws-service-name spot.amazonaws.com ``` -Note that if the Service-Linked Roles already exist in the AWS account (if you used EC2 Spot or Spot Fleet before), -and you try to provision them again, you will see the following errors: +Note that if the Service-Linked Roles already exist in the AWS account (if you used EC2 Spot or Spot Fleet before), and +you try to provision them again, you will see the following errors: ```text An error occurred (InvalidInput) when calling the CreateServiceLinkedRole operation: @@ -86,15 +94,23 @@ Service role name AWSServiceRoleForEC2Spot has been taken in this account, pleas ``` For more details, see: - - https://karpenter.sh/v0.18.0/getting-started/getting-started-with-terraform/ - - https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html - - https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html + +- https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html +- https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html The process of provisioning Karpenter on an EKS cluster consists of 3 steps. -### 1. Provision EKS Fargate Profile for Karpenter and IAM Role for Nodes Launched by Karpenter +### 1. Provision EKS IAM Role for Nodes Launched by Karpenter + +> [!NOTE] +> +> #### VPC assumptions being made +> +> We assume you've already created a VPC using our [VPC component](/modules/vpc) and have private subnets already set +> up. The Karpenter node pools will be launched in the private subnets. -EKS Fargate Profile for Karpenter and IAM Role for Nodes launched by Karpenter are provisioned by the `eks/cluster` component: +EKS IAM Role for Nodes launched by Karpenter are provisioned by the `eks/cluster` component. (EKS can also provision a +Fargate Profile for Karpenter, but deploying Karpenter to Fargate is not recommended.): ```yaml components: @@ -105,50 +121,35 @@ components: inherits: - eks/cluster vars: - attributes: - - blue - eks_component_name: eks/cluster-blue - node_groups: - main: - instance_types: - - t3.medium - max_group_size: 3 - min_group_size: 1 - fargate_profiles: - karpenter: - kubernetes_namespace: karpenter - kubernetes_labels: null karpenter_iam_role_enabled: true ``` -__Notes__: - - Fargate Profile role ARNs need to be added to the `aws-auth` ConfigMap to allow the Fargate Profile nodes to join the EKS cluster (this is done by EKS) - - Karpenter IAM role ARN needs to be added to the `aws-auth` ConfigMap to allow the nodes launched by Karpenter to join the EKS cluster (this is done by the `eks/cluster` component) +> [!NOTE] +> +> The AWS Auth API for EKS is used to authorize the Karpenter controller to interact with the EKS cluster. -We use EKS Fargate Profile for Karpenter because It is recommended to run Karpenter on an EKS Fargate Profile. +Karpenter is installed using a Helm chart. The Helm chart installs the Karpenter controller and a webhook pod as a +Deployment that needs to run before the controller can be used for scaling your cluster. We recommend a minimum of one +small node group with at least one worker node. -```text -Karpenter is installed using a Helm chart. The Helm chart installs the Karpenter controller and -a webhook pod as a Deployment that needs to run before the controller can be used for scaling your cluster. -We recommend a minimum of one small node group with at least one worker node. - -As an alternative, you can run these pods on EKS Fargate by creating a Fargate profile for the -karpenter namespace. Doing so will cause all pods deployed into this namespace to run on EKS Fargate. -Do not run Karpenter on a node that is managed by Karpenter. -``` +As an alternative, you can run these pods on EKS Fargate by creating a Fargate profile for the karpenter namespace. +Doing so will cause all pods deployed into this namespace to run on EKS Fargate. Do not run Karpenter on a node that is +managed by Karpenter. -See [Run Karpenter Controller on EKS Fargate](https://aws.github.io/aws-eks-best-practices/karpenter/#run-the-karpenter-controller-on-eks-fargate-or-on-a-worker-node-that-belongs-to-a-node-group) +See +[Run Karpenter Controller...](https://aws.github.io/aws-eks-best-practices/karpenter/#run-the-karpenter-controller-on-eks-fargate-or-on-a-worker-node-that-belongs-to-a-node-group) for more details. We provision IAM Role for Nodes launched by Karpenter because they must run with an Instance Profile that grants permissions necessary to run containers and configure networking. -We define the IAM role for the Instance Profile in `components/terraform/eks/cluster/karpenter.tf`. +We define the IAM role for the Instance Profile in `components/terraform/eks/cluster/controller-policy.tf`. -Note that we provision the EC2 Instance Profile for the Karpenter IAM role in the `components/terraform/eks/karpenter` component (see the next step). +Note that we provision the EC2 Instance Profile for the Karpenter IAM role in the `components/terraform/eks/karpenter` +component (see the next step). -Run the following commands to provision the EKS Fargate Profile for Karpenter and the IAM role for instances launched by Karpenter -on the blue EKS cluster and add the role ARNs to the `aws-auth` ConfigMap: +Run the following commands to provision the EKS Instance Profile for Karpenter and the IAM role for instances launched +by Karpenter on the blue EKS cluster and add the role ARNs to the EKS Auth API: ```bash atmos terraform plan eks/cluster-blue -s plat-ue2-dev @@ -157,17 +158,29 @@ atmos terraform apply eks/cluster-blue -s plat-ue2-dev For more details, refer to: -- https://karpenter.sh/v0.18.0/getting-started/getting-started-with-terraform -- https://karpenter.sh/v0.18.0/getting-started/getting-started-with-eksctl - +- [Getting started with Terraform](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started/) +- [Getting started with `eksctl`](https://karpenter.sh/docs/getting-started/getting-started-with-karpenter/) ### 2. Provision `karpenter` component In this step, we provision the `components/terraform/eks/karpenter` component, which deploys the following resources: - - EC2 Instance Profile for the nodes launched by Karpenter (note that the IAM role for the Instance Profile is provisioned in the previous step in the `eks/cluster` component) - - Karpenter Kubernetes controller using the Karpenter Helm Chart and the `helm_release` Terraform resource - - EKS IAM role for Kubernetes Service Account for the Karpenter controller (with all the required permissions) +- Karpenter CustomerResourceDefinitions (CRDs) using the Karpenter CRD Chart and the `helm_release` Terraform resource +- Karpenter Kubernetes controller using the Karpenter Helm Chart and the `helm_release` Terraform resource +- EKS IAM role for Kubernetes Service Account for the Karpenter controller (with all the required permissions) +- An SQS Queue and Event Bridge rules for handling Node Interruption events (i.e. Spot) + +Create a stack config for the blue Karpenter component in `stacks/catalog/eks/clusters/blue.yaml`: + +```yaml +eks/karpenter-blue: + metadata: + component: eks/karpenter + inherits: + - eks/karpenter + vars: + eks_component_name: eks/cluster-blue +``` Run the following commands to provision the Karpenter component on the blue EKS cluster: @@ -176,126 +189,162 @@ atmos terraform plan eks/karpenter-blue -s plat-ue2-dev atmos terraform apply eks/karpenter-blue -s plat-ue2-dev ``` -Note that the stack config for the blue Karpenter component is defined in `stacks/catalog/eks/clusters/blue.yaml`. +### 3. Provision `karpenter-node-pool` component + +In this step, we provision the `components/terraform/eks/karpenter-node-pool` component, which deploys Karpenter +[NodePools](https://karpenter.sh/v0.36/getting-started/getting-started-with-karpenter/#5-create-nodepool) using the +`kubernetes_manifest` resource. + +> [!TIP] +> +> #### Why use a separate component for NodePools? +> +> We create the NodePools as a separate component since the CRDs for the NodePools are created by the Karpenter +> component. This helps manage dependencies. + +First, create an abstract component for the `eks/karpenter-node-pool` component: ```yaml - eks/karpenter-blue: +components: + terraform: + eks/karpenter-node-pool: metadata: - component: eks/karpenter - inherits: - - eks/karpenter + type: abstract vars: - eks_component_name: eks/cluster-blue + enabled: true + # Disabling Manifest Experiment disables stored metadata with Terraform state + # Otherwise, the state will show changes on all plans + helm_manifest_experiment_enabled: false + node_pools: + default: + # Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets + private_subnets_enabled: true + # You can use disruption to set the maximum instance lifetime for the EC2 instances launched by Karpenter. + # You can also configure how fast or slow Karpenter should add/remove nodes. + # See more: https://karpenter.sh/v0.36/concepts/disruption/ + disruption: + max_instance_lifetime: "336h" # 14 days + # Taints can be used to prevent pods without the right tolerations from running on this node pool. + # See more: https://karpenter.sh/v0.36/concepts/nodepools/#taints + taints: [] + total_cpu_limit: "1k" + # Karpenter node pool total memory limit for all pods running on the EC2 instances launched by Karpenter + total_memory_limit: "1200Gi" + # Set acceptable (In) and unacceptable (Out) Kubernetes and Karpenter values for node provisioning based on + # Well-Known Labels and cloud-specific settings. These can include instance types, zones, computer architecture, + # and capacity type (such as AWS spot or on-demand). + # See https://karpenter.sh/v0.36/concepts/nodepools/#spectemplatespecrequirements for more details + requirements: + - key: "karpenter.sh/capacity-type" + operator: "In" + # See https://karpenter.sh/docs/concepts/nodepools/#capacity-type + # Allow fallback to on-demand instances when spot instances are unavailable + # By default, Karpenter uses the "price-capacity-optimized" allocation strategy + # https://aws.amazon.com/blogs/compute/introducing-price-capacity-optimized-allocation-strategy-for-ec2-spot-instances/ + # It is currently not configurable, but that may change in the future. + # See https://github.com/aws/karpenter-provider-aws/issues/1240 + values: + - "on-demand" + - "spot" + - key: "kubernetes.io/os" + operator: "In" + values: + - "linux" + - key: "kubernetes.io/arch" + operator: "In" + values: + - "amd64" + # The following two requirements pick instances such as c3 or m5 + - key: karpenter.k8s.aws/instance-category + operator: In + values: ["c", "m", "r"] + - key: karpenter.k8s.aws/instance-generation + operator: Gt + values: ["2"] ``` -### 3. Provision `karpenter-provisioner` component - -In this step, we provision the `components/terraform/eks/karpenter-provisioner` component, which deploys Karpenter [Provisioners](https://karpenter.sh/v0.18.0/aws/provisioning) -using the `kubernetes_manifest` resource. +Now, create the stack config for the blue Karpenter NodePool component in `stacks/catalog/eks/clusters/blue.yaml`: -__NOTE:__ We deploy the provisioners in a separate step as a separate component since it uses `kind: Provisioner` CRD which itself is created by -the `karpenter` component in the previous step. +```yaml +eks/karpenter-node-pool/blue: + metadata: + component: eks/karpenter-node-pool + inherits: + - eks/karpenter-node-pool + vars: + eks_component_name: eks/cluster-blue +``` -Run the following commands to deploy the Karpenter provisioners on the blue EKS cluster: +Finally, run the following commands to deploy the Karpenter NodePools on the blue EKS cluster: ```bash -atmos terraform plan eks/karpenter-provisioner-blue -s plat-ue2-dev -atmos terraform apply eks/karpenter-provisioner-blue -s plat-ue2-dev +atmos terraform plan eks/karpenter-node-pool/blue -s plat-ue2-dev +atmos terraform apply eks/karpenter-node-pool/blue -s plat-ue2-dev ``` -Note that the stack config for the blue Karpenter provisioner component is defined in `stacks/catalog/eks/clusters/blue.yaml`. +## Node Interruption -```yaml - eks/karpenter-provisioner-blue: - metadata: - component: eks/karpenter-provisioner - inherits: - - eks/karpenter-provisioner - vars: - attributes: - - blue - eks_component_name: eks/cluster-blue -``` +Karpenter also supports listening for and responding to Node Interruption events. If interruption handling is enabled, +Karpenter will watch for upcoming involuntary interruption events that would cause disruption to your workloads. These +interruption events include: -You can override the default values from the `eks/karpenter-provisioner` base component. - -For your cluster, you will need to review the following configurations for the Karpenter provisioners and update it according to your requirements: - - - [requirements](https://karpenter.sh/v0.18.0/provisioner/#specrequirements): - - ```yaml - requirements: - - key: "karpenter.sh/capacity-type" - operator: "In" - values: - - "on-demand" - - "spot" - - key: "node.kubernetes.io/instance-type" - operator: "In" - values: - - "m5.xlarge" - - "m5.large" - - "m5.medium" - - "c5.xlarge" - - "c5.large" - - "c5.medium" - - key: "kubernetes.io/arch" - operator: "In" - values: - - "amd64" - ``` - - - `taints`, `startup_taints`, `ami_family` - - - Resource limits/requests for the Karpenter controller itself: - - ```yaml - resources: - limits: - cpu: "300m" - memory: "1Gi" - requests: - cpu: "100m" - memory: "512Mi" - ``` +- Spot Interruption Warnings +- Scheduled Change Health Events (Maintenance Events) +- Instance Terminating Events +- Instance Stopping Events - - Total CPU and memory limits for all pods running on the EC2 instances launched by Karpenter: +> [!TIP] +> +> #### Interruption Handler vs. Termination Handler +> +> The Node Interruption Handler is not the same as the Node Termination Handler. The latter is always enabled and +> cleanly shuts down the node in 2 minutes in response to a Node Termination event. The former gets advance notice that +> a node will soon be terminated, so it can have 5-10 minutes to shut down a node. - ```yaml - total_cpu_limit: "1k" - total_memory_limit: "1000Gi" - ``` +For more details, see refer to the [Karpenter docs](https://karpenter.sh/v0.32/concepts/disruption/#interruption) and +[FAQ](https://karpenter.sh/v0.32/faq/#interruption-handling) - - Config to terminate empty nodes after the specified number of seconds. This behavior can be disabled by setting the value to `null` (never scales down if not set): +To enable Node Interruption handling, set `var.interruption_handler_enabled` to `true`. This will create an SQS queue +and a set of Event Bridge rules to deliver interruption events to Karpenter. - ```yaml - ttl_seconds_after_empty: 30 - ``` +## Custom Resource Definition (CRD) Management - - Config to terminate nodes when a maximum age is reached. This behavior can be disabled by setting the value to `null` (never expires if not set): +Karpenter ships with a few Custom Resource Definitions (CRDs). In earlier versions of this component, when installing a +new version of the `karpenter` helm chart, CRDs were not be upgraded at the same time, requiring manual steps to upgrade +CRDs after deploying the latest chart. However Karpenter now supports an additional, independent helm chart for CRD +management. This helm chart, `karpenter-crd`, can be installed alongside the `karpenter` helm chart to automatically +manage the lifecycle of these CRDs. - ```yaml - ttl_seconds_until_expired: 2592000 - ``` +To deploy the `karpenter-crd` helm chart, set `var.crd_chart_enabled` to `true`. (Installing the `karpenter-crd` chart +is recommended. `var.crd_chart_enabled` defaults to `false` to preserve backward compatibility with older versions of +this component.) -For more details, refer to: +## Troubleshooting - - https://karpenter.sh/v0.18.0/provisioner/#specrequirements - - https://karpenter.sh/v0.18.0/aws/provisioning - - https://aws.github.io/aws-eks-best-practices/karpenter/#creating-provisioners - - https://aws.github.io/aws-eks-best-practices/karpenter - - https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html +For Karpenter issues, checkout the [Karpenter Troubleshooting Guide](https://karpenter.sh/docs/troubleshooting/) +### References +For more details on the CRDs, see: + +- https://karpenter.sh/v0.36/getting-started/getting-started-with-karpenter/#5-create-nodepool +- https://karpenter.sh/v0.36/concepts/disruption/#interruption +- https://karpenter.sh/v0.36/concepts/nodepools/#taints +- https://karpenter.sh/v0.36/concepts/nodepools/#spectemplatespecrequirements + +- https://karpenter.sh/v0.36/getting-started/getting-started-with-karpenter/ +- https://aws.github.io/aws-eks-best-practices/karpenter + + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | | [aws](#requirement\_aws) | >= 4.9.0 | | [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | ## Providers @@ -307,17 +356,25 @@ For more details, refer to: | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [karpenter](#module\_karpenter) | cloudposse/helm-release/aws | 0.7.0 | +| [karpenter](#module\_karpenter) | cloudposse/helm-release/aws | 0.10.1 | +| [karpenter\_crd](#module\_karpenter\_crd) | cloudposse/helm-release/aws | 0.10.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources | Name | Type | |------|------| -| [aws_iam_instance_profile.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | +| [aws_cloudwatch_event_rule.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_target.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_iam_policy.v1alpha](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role_policy_attachment.v1alpha](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_sqs_queue.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | +| [aws_sqs_queue_policy.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | | [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_iam_policy_document.interruption_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | ## Inputs @@ -333,37 +390,41 @@ For more details, refer to: | [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed | `string` | `null` | no | | [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails | `bool` | `true` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false` | `bool` | `null` | no | +| [crd\_chart](#input\_crd\_chart) | The name of the Karpenter CRD chart to be installed, if `var.crd_chart_enabled` is set to `true`. | `string` | `"karpenter-crd"` | no | +| [crd\_chart\_enabled](#input\_crd\_chart\_enabled) | `karpenter-crd` can be installed as an independent helm chart to manage the lifecycle of Karpenter CRDs. Set to `true` to install this CRD helm chart before the primary karpenter chart. | `bool` | `false` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [interruption\_handler\_enabled](#input\_interruption\_handler\_enabled) | If `true`, deploy a SQS queue and Event Bridge rules to enable interruption handling by Karpenter.
https://karpenter.sh/docs/concepts/disruption/#interruption | `bool` | `true` | no | +| [interruption\_queue\_message\_retention](#input\_interruption\_queue\_message\_retention) | The message retention in seconds for the interruption handler SQS queue. | `number` | `300` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | -| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into | `string` | n/a | yes | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [logging](#input\_logging) | A subset of the logging settings for the Karpenter controller |
object({
enabled = optional(bool, true)
level = optional(object({
controller = optional(string, "info")
global = optional(string, "info")
webhook = optional(string, "error")
}), {})
})
| `{}` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [rbac\_enabled](#input\_rbac\_enabled) | Enable/disable RBAC | `bool` | `true` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [replicas](#input\_replicas) | The number of Karpenter controller replicas to run | `number` | `2` | no | | [resources](#input\_resources) | The CPU and memory of the deployment's limits and requests |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
| n/a | yes | +| [settings](#input\_settings) | A subset of the settings for the Karpenter controller.
Some settings are implicitly set by this component, such as `clusterName` and
`interruptionQueue`. All settings can be overridden by providing a `settings`
section in the `chart_values` variable. The settings provided here are the ones
mostly likely to be set to other than default values, and are provided here for convenience. |
object({
batch_idle_duration = optional(string, "1s")
batch_max_duration = optional(string, "10s")
})
| `{}` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -374,28 +435,20 @@ For more details, refer to: | Name | Description | |------|-------------| -| [instance\_profile](#output\_instance\_profile) | Provisioned EC2 Instance Profile for nodes launched by Karpenter | | [metadata](#output\_metadata) | Block status of the deployed release | + -## References +## Related reading - https://karpenter.sh -- https://aws.github.io/aws-eks-best-practices/karpenter -- https://karpenter.sh/v0.18.0/getting-started/getting-started-with-terraform - https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler - https://github.com/aws/karpenter -- https://www.eksworkshop.com/beginner/085_scaling_karpenter - https://ec2spotworkshops.com/karpenter.html -- https://www.eksworkshop.com/beginner/085_scaling_karpenter/install_karpenter -- https://karpenter.sh/v0.18.0/development-guide -- https://karpenter.sh/v0.18.0/aws/provisioning +- https://www.eksworkshop.com/docs/autoscaling/compute/karpenter/ - https://docs.aws.amazon.com/eks/latest/userguide/pod-execution-role.html - https://aws.amazon.com/premiumsupport/knowledge-center/fargate-troubleshoot-profile-creation - https://learn.hashicorp.com/tutorials/terraform/kubernetes-crd-faas -- https://github.com/hashicorp/terraform-provider-kubernetes/issues/1545 -- https://issuemode.com/issues/hashicorp/terraform-provider-kubernetes-alpha/4840198 -- https://bytemeta.vip/repo/hashicorp/terraform-provider-kubernetes/issues/1442 - https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html [](https://cpco.io/component) diff --git a/modules/eks/karpenter/controller-policy-v1alpha.tf b/modules/eks/karpenter/controller-policy-v1alpha.tf new file mode 100644 index 000000000..d2c5f6b29 --- /dev/null +++ b/modules/eks/karpenter/controller-policy-v1alpha.tf @@ -0,0 +1,89 @@ +##### +# The primary and current (v1beta API) controller policy is in the controller-policy.tf file. +# +# However, if you have workloads that were deployed under the v1alpha API, you need to also +# apply this controller-policy-v1alpha.tf policy to the Karpenter controller to give it permission +# to manage (an in particular, delete) those workloads, and give it permission to manage the +# EC2 Instance Profile possibly created by the EKS cluster component. +# +# This policy is not needed for workloads deployed under the v1beta API with the +# EC2 Instance Profile created by the Karpenter controller. +# +# This allows it to terminate instances and delete launch templates that are tagged with the +# v1alpha API tag "karpenter.sh/provisioner-name" and to manage the EC2 Instance Profile +# created by the EKS cluster component. +# +# We create a separate policy and attach it separately to the Karpenter controller role +# because the main policy is near the 6,144 character limit for an IAM policy, and +# adding this to it can push it over. See: +# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entities +# + +locals { + controller_policy_v1alpha_json = <<-EndOfPolicy + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowScopedDeletionV1alpha", + "Effect": "Allow", + "Resource": [ + "arn:${local.aws_partition}:ec2:${var.region}:*:instance/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:launch-template/*" + ], + "Action": [ + "ec2:TerminateInstances", + "ec2:DeleteLaunchTemplate" + ], + "Condition": { + "StringEquals": { + "ec2:ResourceTag/karpenter.k8s.aws/cluster": "${local.eks_cluster_id}" + }, + "StringLike": { + "ec2:ResourceTag/karpenter.sh/provisioner-name": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileActionsV1alpha", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${var.region}" + }, + "ArnEquals": { + "ec2:InstanceProfile": "${replace(local.karpenter_node_role_arn, "role", "instance-profile")}" + } + } + } + ] + } + EndOfPolicy +} + +# We create a separate policy and attach it separately to the Karpenter controller role +# because the main policy is near the 6,144 character limit for an IAM policy, and +# adding this to it can push it over. See: +# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entities +resource "aws_iam_policy" "v1alpha" { + count = local.enabled ? 1 : 0 + + name = "${module.this.id}-v1alpha" + description = "Legacy Karpenter controller policy for v1alpha workloads" + policy = local.controller_policy_v1alpha_json + tags = module.this.tags +} + +resource "aws_iam_role_policy_attachment" "v1alpha" { + count = local.enabled ? 1 : 0 + + role = module.karpenter.service_account_role_name + policy_arn = one(aws_iam_policy.v1alpha[*].arn) +} diff --git a/modules/eks/karpenter/controller-policy.tf b/modules/eks/karpenter/controller-policy.tf new file mode 100644 index 000000000..f2b4924f2 --- /dev/null +++ b/modules/eks/karpenter/controller-policy.tf @@ -0,0 +1,298 @@ +# Unfortunately, Karpenter does not provide the Karpenter controller IAM policy in JSON directly: +# https://github.com/aws/karpenter/issues/2649 +# +# You can get it from the `data.aws_iam_policy_document.karpenter_controller` in +# https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/modules/iam-role-for-service-accounts-eks/policies.tf +# but that is not guaranteed to be up-to-date. +# +# Instead, we download the official source of truth, the CloudFormation template, and extract the IAM policy from it. +# +# The policy is not guaranteed to be stable from version to version. +# However, it seems stable enough, and we will leave for later the task of supporting multiple versions. +# +# To get the policy for a given Karpenter version >= 0.32.0, run: +# +# KARPENTER_VERSION= +# curl -O -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml +# +# Then open the downloaded cloudformation.yaml file and look for this resource (there may be other lines in between): +# +# KarpenterControllerPolicy: +# Type: AWS::IAM::ManagedPolicy +# Properties: +# PolicyDocument: !Sub | +# +# After which should be the IAM policy document in JSON format, with +# CloudFormation substitutions like +# +# "Resource": "arn:${local.aws_partition}:eks:${var.region}:${AWS::AccountId}:cluster/${local.eks_cluster_id}" +# +# NOTE: As a special case, the above multiple substitutions which create the ARN for the EKS cluster +# should be replaced with a single substitution, `${local.eks_cluster_arn}` to avoid needing to +# look up the account ID and because it is more robust. +# +# Review the existing HEREDOC below to find conditionals such as: +# %{if local.interruption_handler_enabled } +# and figure out how you want to re-incorporate them into the new policy, if needed. +# +# Paste the new policy into the HEREDOC below, then replace the CloudFormation substitutions with Terraform substitutions, +# e.g. ${var.region} -> ${var.region} +# +# and restore the conditionals. +# + +locals { + controller_policy_json = <<-EndOfPolicy + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowScopedEC2InstanceAccessActions", + "Effect": "Allow", + "Resource": [ + "arn:${local.aws_partition}:ec2:${var.region}::image/*", + "arn:${local.aws_partition}:ec2:${var.region}::snapshot/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:security-group/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:subnet/*" + ], + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet" + ] + }, + { + "Sid": "AllowScopedEC2LaunchTemplateAccessActions", + "Effect": "Allow", + "Resource": "arn:${local.aws_partition}:ec2:${var.region}:*:launch-template/*", + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + } + } + }, + { + "Sid": "AllowScopedEC2InstanceActionsWithTags", + "Effect": "Allow", + "Resource": [ + "arn:${local.aws_partition}:ec2:${var.region}:*:fleet/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:instance/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:volume/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:network-interface/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:launch-template/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:spot-instances-request/*" + ], + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned" + }, + "StringLike": { + "aws:RequestTag/karpenter.sh/nodepool": "*" + } + } + }, + { + "Sid": "AllowScopedResourceCreationTagging", + "Effect": "Allow", + "Resource": [ + "arn:${local.aws_partition}:ec2:${var.region}:*:fleet/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:instance/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:volume/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:network-interface/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:launch-template/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:spot-instances-request/*" + ], + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned", + "ec2:CreateAction": [ + "RunInstances", + "CreateFleet", + "CreateLaunchTemplate" + ] + }, + "StringLike": { + "aws:RequestTag/karpenter.sh/nodepool": "*" + } + } + }, + { + "Sid": "AllowScopedResourceTagging", + "Effect": "Allow", + "Resource": "arn:${local.aws_partition}:ec2:${var.region}:*:instance/*", + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + }, + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "karpenter.sh/nodeclaim", + "Name" + ] + } + } + }, + { + "Sid": "AllowScopedDeletion", + "Effect": "Allow", + "Resource": [ + "arn:${local.aws_partition}:ec2:${var.region}:*:instance/*", + "arn:${local.aws_partition}:ec2:${var.region}:*:launch-template/*" + ], + "Action": [ + "ec2:TerminateInstances", + "ec2:DeleteLaunchTemplate" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + } + } + }, + { + "Sid": "AllowRegionalReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets" + ], + "Condition": { + "StringEquals": { + "aws:RequestedRegion": "${var.region}" + } + } + }, + { + "Sid": "AllowSSMReadActions", + "Effect": "Allow", + "Resource": "arn:${local.aws_partition}:ssm:${var.region}::parameter/aws/service/*", + "Action": "ssm:GetParameter" + }, + { + "Sid": "AllowPricingReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": "pricing:GetProducts" + }, + %{if local.interruption_handler_enabled} + { + "Sid": "AllowInterruptionQueueActions", + "Effect": "Allow", + "Resource": "${local.interruption_handler_queue_arn}", + "Action": [ + "sqs:DeleteMessage", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage" + ] + }, + %{endif} + { + "Sid": "AllowPassingInstanceRole", + "Effect": "Allow", + "Resource": "${local.karpenter_node_role_arn}", + "Action": "iam:PassRole", + "Condition": { + "StringEquals": { + "iam:PassedToService": "ec2.amazonaws.com" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileCreationActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:CreateInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${var.region}" + }, + "StringLike": { + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileTagActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:TagInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${var.region}", + "aws:RequestTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${var.region}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*", + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${local.eks_cluster_id}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${var.region}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowInstanceProfileReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": "iam:GetInstanceProfile" + }, + { + "Sid": "AllowAPIServerEndpointDiscovery", + "Effect": "Allow", + "Resource": "${local.eks_cluster_arn}", + "Action": "eks:DescribeCluster" + } + ] + } + EndOfPolicy +} diff --git a/modules/eks/karpenter/docs/v1alpha-to-v1beta-migration.md b/modules/eks/karpenter/docs/v1alpha-to-v1beta-migration.md new file mode 100644 index 000000000..fb73b326a --- /dev/null +++ b/modules/eks/karpenter/docs/v1alpha-to-v1beta-migration.md @@ -0,0 +1,209 @@ +# Migration Guide + +## Prepare to Upgrade Karpenter API version + +Before you begin upgrading from Karpenter `v1alpha5` to `v1beta1` APIs, you should get your applications ready for the +changes and validate that your existing configuration has been applied to all your Karpenter instances. You may also +want to upgrade to the latest `v1alpha5` version of Karpenter (0.31.4 as of this writing) to ensure you haven't missed +any changes. + +### Validate your existing Karpenter deployments + +In order to preserve some kind of ability to rollback, you should validate your existing Karpenter deployments are in a +good state by planning them and either verifying they have no changes, or fixing them or deploying the changes. Then +freeze this configuration so that you can roll back to it if needed. + +### Make all your changes to related components + +Make all the changes to related components that are required to support the new version of Karpenter. This mainly +involves updating annotations and tolerations in your workloads to match the new Karpenter annotations and taints. Keep +the existing annotations and tolerations in place, so that your workloads will work with both versions. + +A lot of labels, tags, and annotations have changed in the new version of Karpenter. You should review the +[Karpenter v1beta1 migration guide](https://karpenter.sh/v0.32/upgrading/v1beta1-migration/) and roll out the changes to +your workloads before upgrading Karpenter. Where possible, you should roll out the changes in such a way that they work +with both the old and new versions of Karpenter. For example, instead of replacing the old annotations with the new +annotations, you should add the new annotations in addition to the old annotations, and remove the old annotations +later. + +Here are some highlights of the changes, but you should review the full +[Karpenter v1beta1 migration guide](https://karpenter.sh/v0.32/upgrading/v1beta1-migration/) for all the changes: + +- Annotations `karpenter.sh/do-not-consolidate` and `karpenter.sh/do-not-evict` have been replaced with + `karpenter.sh/do-not-disrupt: "true"` +- Nodes spawned by the `v1beta1` resource will use the taint `karpenter.sh/disruption:NoSchedule=disrupting` instead of + `node.kubernetes.io/unschedulable` so you may need to adjust pod tolerations +- The following deprecated node labels have been removed in favor of their more modern equivalents. These need to be + changed in your workloads where they are used for topology constraints, affinities, etc. and they also need to be + changed in your NodePool (formerly Provisioner) requirements: + - `failure-domain.beta.kubernetes.io/zone` -> `topology.kubernetes.io/zone` + - `failure-domain.beta.kubernetes.io/region` -> `topology.kubernetes.io/region` + - `beta.kubernetes.io/arch` -> `kubernetes.io/arch` + - `beta.kubernetes.io/os` -> `kubernetes.io/os` + - `beta.kubernetes.io/instance-type` -> `node.kubernetes.io/instance-type` + +Deploy all these changes. + +### Deploy a managed node group, if you haven't already + +Karpenter now recommends deploying to it into a managed node group rather than via Fargate. In part, this is because +Karpenter also strongly recommends it be deployed to the `kube-system` namespace, and deploying the `kube-system` +namespace to Fargate is inefficient at best. This component no longer supports deploying Karpenter to any namespace +other than `kube-system`, so if you had been deploying it to Fargate, you probably want to provision a minimal managed +node group to run the `kube-system` namespace, and it will also host Karpenter as well. + +## Migration, the Long Way + +It is possible to upgrade Karpenter step-by-step, but it is a long process. Here are the basic steps to get you to +v0.36.0 (there may be more for later versions): + +- Upgrade to v0.31.4 (or later v0.31.x if available), fixing any upgrade issues +- Upgrade to v0.32.9, moving Karpenter to the `kube-system` namespace, which will require some manual intervention when + applying the Helm chart +- Deploy all new Karpenter `v1beta1` resources that mirror your `v1alpha5` resources, and make all the other changes + listed in the [v1beta1 migration guide](https://karpenter.sh/v0.32/upgrading/v1beta1-migration/) such as (not a + complete list): + - Annotations `karpenter.sh/do-not-consolidate` and `karpenter.sh/do-not-evict` have been replaced with + `karpenter.sh/do-not-disrupt: "true"` + - Karpenter-generated tag keys have changed, so you may need to adjust your IAM Policies if you are using + Attribute-Based Access Control. + - The `karpenter-global-settings` ConfigMap has been replaced with settings via Environment Variables and CLI flags + - Default log encoding changed from console to JSON, so if your log processing cannot handle JSON logs, you should + probably change your log processing rather than sticking with the deprecated console encoding + - Prometheus metrics are now served on port 8001. You may need to adjust scraper configurations, and you may need to + override this port setting if it would otherwise cause a conflict. +- Delete all old Karpenter `v1alpha5` resources +- Review the [Karpenter upgrade guide](https://karpenter.sh/docs/upgrading/upgrade-guide/) and make additional changes + to reflect your preferences regarding new features and changes in behavior, such as (not a complete list): + - Availability of Node Pool Disruption Budgets + - Incompatibility with Ubuntu 22.04 EKS AMI + - Changes to names of Kubernetes labels Karpenter uses + - Changes to tags Karpenter uses + - Recommendation to move Karpenter from `karpenter` namespace to `kube-system` + - Deciding on if you want drift detection enabled + - Changes to logging configuration + - Changes to how Selectors, e.g. for Subnets, are configured + - Karpenter now uses a podSecurityContext to configure the `fsgroup` for pod volumes (to `65536`), which can affect + sidecars +- Upgrade to the latest version of Karpenter + +This multistep process is particularly difficult to organize and execute using Terraform and Helm because of the +changing resource types and configuration required to support both `v1alpha5` and `v1beta1` resources at the same time. +Therefore, this component does not support this path, and this document does not describe it in any greater detail. + +## Migration, the Shorter Way + +The shortest way is to delete all Karpenter resources, completely deleting the Cloud Posse `eks/karpenter` and +`eks/karpenter-provisioner` components, and then upgrading the components to the latest version and redeploying them. + +The shorter (but not shortest) way is to abandon the old configuration and code in place, taking advantage of the fact +that `eks/karpenter-provisioner` has been replaced with `eks/karpenter-node-pool`. That path is what the rest of this +document describes. + +### Disable automatic deployments + +If you are using some kind of automatic deployment, such as Spacelift, disable it for the `karpenter` and +`karpenter-provisioner` stacks. This is because we will roll out breaking changes, and want to sequence the operations +manually. If using Spacelift, you can disable it by setting `workspace_enabled: false`, but remember, you must check in +the changes and merge them to your default branch in order for them to take effect. + +### Copy existing configuration to new names + +The `eks/karpenter-provisioner` component has been replaced with the `eks/karpenter-node-pool` component. You should +copy your existing `karpenter-provisioner` stacks to `karpenter-node-pool` stacks, adjusting the component name and +adding it to the import list wherever `karpenter-provisioner` was imported. + +For the moment, we will leave the old `karpenter-provisioner` component and stacks in place. + +### Revise your copied `karpenter-node-pool` stacks + +Terminology has changed and some settings have been moved in the new version. See the +[Karpenter v1beta1 Migration Guide](https://karpenter.sh/v0.32/upgrading/v1beta1-migration/) for details. + +For the most part you can just use the copied settings from the old version of this component directly in new version, +but there are some changes. + +As you have seen, "provisioner" has been renamed "node_pool". So you will need to make some changes to your new +`karpenter-node-pool` stacks. + +Specifically, `provisioner` input has been renamed `node_pools`. Within that input: + +- The `consolidation` input, which used to be a single boolean, has been replaced with the full `disruption` element of + the NodePool. +- The old `ttl_seconds_after_empty` is now `disruption.consolidate_after`. +- The old `ttl_seconds_until_expired` is now `disruption.max_instance_lifetime` to align with the EC2 Auto Scaling Group + terminology, although Karpenter calles it `expiresAfter`. +- `spec.template.spec.kubelet` settings are not yet supported by this component. +- `settings.aws.enablePodENI` and `settings.aws.enableENILimitedPodDensity`, which you may have previously set via + `chart_values`, have been dropped by Karpenter. +- Many other chart values you may be been setting by `chart_values` have been moved. See + [Karpenter v1beta1 Migration Guide](https://karpenter.sh/v0.32/upgrading/v1beta1-migration/#helm-values) for details. + +### Revise your `karpenter` stacks + +The `karpenter` stack probably requires only a few changes. In general, if you had been setting anything via +`chart_values`, you probably should just delete those settings. If the component doesn't support the setting, it is +likely that Karpenter no longer supports it, or the way it is configured vai the chart has changed. + +For examples, `AWS_ENI_LIMITED_POD_DENSITY` is no longer supported by Karpenter, and `replicas` is now a setting of the +component, and does not need to be set via `chart_values`. + +- Update the chart version. Find the latest version by looking inside the + [Chart.yaml](https://github.com/aws/karpenter-provider-aws/blob/main/charts/karpenter/Chart.yaml) file in the + Karpenter Helm chart repository, on the main branch. Use the value set as `version` (not `appVersion`, if different) + in that file. + +- Karpenter is now always deployed to the `kube-system` namespace. Any Kubernetes namespace configuration inputs have + been removed. Remove these lines from your configuration: + + ```yaml + create_namespace: true + kubernetes_namespace: "karpenter" + ``` + +- The number of replicas can now be set via the `replicas` input. That said, there is little reason to change this from + the default of 2. Only one controller is active at a time, and the other one is a standby. There is no load sharing or + other reason to have more than 2 replicas in most cases. + +- The lifecycle settings `consolidation`, `ttl_seconds_after_empty` and `ttl_seconds_until_expired` have been moved to + the `disruption` input. Unfortunately, the documentation for the Karpetner Disruption spec is lacking, so read the + comments in the code for the `disruption` input for details. The short story is: + + - `consolidation` is now enabled by default. To disable it, set `disruption.consolidate_after` to `"Never"`. + - If you previously set `ttl_seconds_after_empty`, move that setting to the `disruption.consolidate_after` attribute, + and set `disruption.consolidation_policy` to `"WhenEmpty"`. + - If you previously set `ttl_seconds_until_expired`, move that setting to the `disruption.max_instance_lifetime` + attribute. If you previously left it unset, you can keep the previous behavior by setting it to "Never". The new + default it to expire instances after 336 hours (14 days). + - The disruption setting can optionally take a list of `budget` settings. See the + [Disruption Budgets documentation](https://karpenter.sh/docs/concepts/disruption/#disruption-budgets) for details on + what this is. It is **not** the same as a Pod disruption budget, which tries to put limits on the number of + instances of a pod that are running at once. Instead, it is a limitation on how quickly Karpenter will remove + instances. + +- The [interruption handler](https://karpenter.sh/docs/concepts/disruption/#interruption) is now enabled by default. If + you had disabled it, you may want to reconsider. It is a key feature of Karpenter that allows it to automatically + handle interruptions and reschedule pods on other nodes gracefully given the advance notice provided by AWS of + involuntary interruption events. + +- The `legacy_create_karpenter_instance_profile` has been removed. Previously, this component would create an instance + profile for the Karpenter nodes. This flag disabled that behavior in favor of having the EKS cluster create the + instance profile, because the Terraform code could not handle certain edge cases. Now Karpenter itself creates the + instance profile and handles the edge cases, so the flag is no longer needed. + + As a side note: if you are using the `eks/cluster` component, you can remove any + `legacy_do_not_create_karpenter_instance_profile` configuration from it after finishing the migration to the new + Karpenter APIs. + +- Logging configuration has changed. The component has a single `logging` input object that defaults to enabled at the + "info" level for the controller. If you were configuring logging via `chart_values`, we recommend you remove that + configuration and use the new input object. However, if the input object is not sufficient for your needs, you can use + new chart values to configure the logging level and format, but be aware the new chart inputs controlling logging are + significantly different from the old ones. + +- You may want to take advantage of the new `batch_idle_duration` and `batch_max_duration` settings, set as attributes + of the `settings` input. These settings allow you to control how long Karpenter waits for more pods to be deployed + before launching a new instance. This is useful if you have many pods to deploy in response to a single event, such as + when launching multiple CI jobs to handle a new release. Karpenter can then launch a single instance to handle them + all, rather than launching a new instance for each pod. See the + [batching parameters](https://karpenter.sh/docs/reference/settings/#batching-parameters) documentation for details. diff --git a/modules/eks/karpenter/interruption_handler.tf b/modules/eks/karpenter/interruption_handler.tf new file mode 100644 index 000000000..56a3334f7 --- /dev/null +++ b/modules/eks/karpenter/interruption_handler.tf @@ -0,0 +1,99 @@ +# These event definitions, queue policies, and SQS queue definition +# come from the Karpenter CloudFormation template. +# See comments in `controller-policy.tf` for more information. + +locals { + interruption_handler_enabled = local.enabled && var.interruption_handler_enabled + interruption_handler_queue_name = module.this.id + interruption_handler_queue_arn = one(aws_sqs_queue.interruption_handler[*].arn) + + dns_suffix = join("", data.aws_partition.current[*].dns_suffix) + + events = { + health_event = { + name = "HealthEvent" + description = "Karpenter interrupt - AWS health event" + event_pattern = { + source = ["aws.health"] + detail-type = ["AWS Health Event"] + } + } + spot_interupt = { + name = "SpotInterrupt" + description = "Karpenter interrupt - EC2 spot instance interruption warning" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Spot Instance Interruption Warning"] + } + } + instance_rebalance = { + name = "InstanceRebalance" + description = "Karpenter interrupt - EC2 instance rebalance recommendation" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Instance Rebalance Recommendation"] + } + } + instance_state_change = { + name = "InstanceStateChange" + description = "Karpenter interrupt - EC2 instance state-change notification" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Instance State-change Notification"] + } + } + } +} + +resource "aws_sqs_queue" "interruption_handler" { + count = local.interruption_handler_enabled ? 1 : 0 + + name = local.interruption_handler_queue_name + message_retention_seconds = var.interruption_queue_message_retention + sqs_managed_sse_enabled = true + + tags = module.this.tags +} + +data "aws_iam_policy_document" "interruption_handler" { + count = local.interruption_handler_enabled ? 1 : 0 + + statement { + sid = "SqsWrite" + actions = ["sqs:SendMessage"] + resources = [aws_sqs_queue.interruption_handler[0].arn] + + principals { + type = "Service" + identifiers = [ + "events.${local.dns_suffix}", + "sqs.${local.dns_suffix}", + ] + } + } +} + +resource "aws_sqs_queue_policy" "interruption_handler" { + count = local.interruption_handler_enabled ? 1 : 0 + + queue_url = aws_sqs_queue.interruption_handler[0].url + policy = data.aws_iam_policy_document.interruption_handler[0].json +} + +resource "aws_cloudwatch_event_rule" "interruption_handler" { + for_each = { for k, v in local.events : k => v if local.interruption_handler_enabled } + + name = "${module.this.id}-${each.value.name}" + description = each.value.description + event_pattern = jsonencode(each.value.event_pattern) + + tags = module.this.tags +} + +resource "aws_cloudwatch_event_target" "interruption_handler" { + for_each = { for k, v in local.events : k => v if local.interruption_handler_enabled } + + rule = aws_cloudwatch_event_rule.interruption_handler[each.key].name + target_id = "KarpenterInterruptionQueueTarget" + arn = aws_sqs_queue.interruption_handler[0].arn +} diff --git a/modules/eks/karpenter/karpenter-crd-upgrade b/modules/eks/karpenter/karpenter-crd-upgrade new file mode 100755 index 000000000..a3e3ce05c --- /dev/null +++ b/modules/eks/karpenter/karpenter-crd-upgrade @@ -0,0 +1,28 @@ +#!/bin/bash + +function usage() { + cat >&2 <<'EOF' +./karpenter-crd-upgrade + +Use this script to upgrade the Karpenter CRDs by installing or upgrading the karpenter-crd helm chart. + +EOF +} + +function upgrade() { + VERSION="${1}" + [[ $VERSION =~ ^v ]] || VERSION="v${VERSION}" + + set -x + + kubectl label crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh app.kubernetes.io/managed-by=Helm --overwrite + kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh meta.helm.sh/release-name=karpenter-crd --overwrite + kubectl annotate crd awsnodetemplates.karpenter.k8s.aws provisioners.karpenter.sh meta.helm.sh/release-namespace=karpenter --overwrite + helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version "$VERSION" --namespace karpenter +} + +if (($# == 0)); then + usage +else + upgrade $1 +fi diff --git a/modules/eks/karpenter/main.tf b/modules/eks/karpenter/main.tf index f7f47e844..3d930b117 100644 --- a/modules/eks/karpenter/main.tf +++ b/modules/eks/karpenter/main.tf @@ -1,33 +1,60 @@ # https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler/ # https://karpenter.sh/ -# https://karpenter.sh/v0.10.1/getting-started/getting-started-with-terraform/ -# https://karpenter.sh/v0.10.1/getting-started/getting-started-with-eksctl/ -# https://www.eksworkshop.com/beginner/085_scaling_karpenter/ -# https://karpenter.sh/v0.10.1/aws/provisioning/ -# https://www.eksworkshop.com/beginner/085_scaling_karpenter/setup_the_environment/ -# https://ec2spotworkshops.com/karpenter.html -# https://catalog.us-east-1.prod.workshops.aws/workshops/76a5dd80-3249-4101-8726-9be3eeee09b2/en-US/autoscaling/karpenter locals { enabled = module.this.enabled - eks_cluster_identity_oidc_issuer = try(module.eks.outputs.eks_cluster_identity_oidc_issuer, "") - karpenter_iam_role_name = try(module.eks.outputs.karpenter_iam_role_name, "") - karpenter_role_enabled = local.enabled && length(local.karpenter_iam_role_name) > 0 + # We need aws_partition to be non-null even when this module is disabled, because it is used in a string template + aws_partition = coalesce(one(data.aws_partition.current[*].partition), "aws") + + # eks_cluster_id is defined in provider-helm.tf + # eks_cluster_id = module.eks.outputs.eks_cluster_id + eks_cluster_arn = module.eks.outputs.eks_cluster_arn + eks_cluster_identity_oidc_issuer = module.eks.outputs.eks_cluster_identity_oidc_issuer + + karpenter_node_role_arn = module.eks.outputs.karpenter_iam_role_arn + + # Prior to Karpenter v0.32.0 (the v1Alpha APIs), Karpenter recommended using a dedicated namespace for Karpenter resources. + # Starting with Karpenter v0.32.0, Karpenter recommends installing Karpenter resources in the kube-system namespace. + # https://karpenter.sh/docs/getting-started/getting-started-with-karpenter/#preventing-apiserver-request-throttling + kubernetes_namespace = "kube-system" } -resource "aws_iam_instance_profile" "default" { - count = local.karpenter_role_enabled ? 1 : 0 +data "aws_partition" "current" { + count = local.enabled ? 1 : 0 +} + + +# Deploy karpenter-crd helm chart +# "karpenter-crd" can be installed as an independent helm chart to manage the lifecycle of Karpenter CRDs +module "karpenter_crd" { + enabled = local.enabled && var.crd_chart_enabled + + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + name = var.crd_chart + chart = var.crd_chart + repository = var.chart_repository + description = var.chart_description + chart_version = var.chart_version + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + create_namespace_with_kubernetes = false # Namespace is created by EKS/Kubernetes by default + kubernetes_namespace = local.kubernetes_namespace - name = local.karpenter_iam_role_name - role = local.karpenter_iam_role_name - tags = module.this.tags + eks_cluster_oidc_issuer_url = coalesce(replace(local.eks_cluster_identity_oidc_issuer, "https://", ""), "deleted") + + context = module.this.context } # Deploy Karpenter helm chart module "karpenter" { source = "cloudposse/helm-release/aws" - version = "0.7.0" + version = "0.10.1" chart = var.chart repository = var.chart_repository @@ -38,88 +65,63 @@ module "karpenter" { cleanup_on_fail = var.cleanup_on_fail timeout = var.timeout - create_namespace_with_kubernetes = var.create_namespace - kubernetes_namespace = var.kubernetes_namespace - kubernetes_namespace_labels = merge(module.this.tags, { name = var.kubernetes_namespace }) + create_namespace_with_kubernetes = false # Namespace is created with kubernetes_namespace resources to be shared between charts + kubernetes_namespace = local.kubernetes_namespace eks_cluster_oidc_issuer_url = coalesce(replace(local.eks_cluster_identity_oidc_issuer, "https://", ""), "deleted") service_account_name = module.this.name - service_account_namespace = var.kubernetes_namespace - - iam_role_enabled = local.karpenter_role_enabled - - # https://karpenter.sh/v0.6.1/getting-started/cloudformation.yaml - # https://karpenter.sh/v0.10.1/getting-started/getting-started-with-terraform - # https://github.com/aws/karpenter/issues/2649 - # Apparently the source of truth for the best IAM policy is the `data.aws_iam_policy_document.karpenter_controller` in - # https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/modules/iam-role-for-service-accounts-eks/policies.tf - iam_policy_statements = [ - { - sid = "KarpenterController" - effect = "Allow" - resources = ["*"] - - actions = [ - # https://github.com/terraform-aws-modules/terraform-aws-iam/blob/99c69ad54d985f67acf211885aa214a3a6cc931c/modules/iam-role-for-service-accounts-eks/policies.tf#L511-L581 - # The reference policy is broken up into multiple statements with different resource restrictions based on tags. - # This list has breaks where statements are separated in the reference policy for easier comparison and maintenance. - "ec2:CreateLaunchTemplate", - "ec2:CreateFleet", - "ec2:CreateTags", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeImages", - "ec2:DescribeInstances", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstanceTypeOfferings", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeSpotPriceHistory", - "pricing:GetProducts", - - "ec2:TerminateInstances", - "ec2:DeleteLaunchTemplate", - - "ec2:RunInstances", - - "iam:PassRole", - ] - }, - { - sid = "KarpenterControllerSSM" - effect = "Allow" - # Allow Karpenter to read AMI IDs from SSM - actions = ["ssm:GetParameter"] - resources = ["arn:aws:ssm:*:*:parameter/aws/service/*"] - } - ] + service_account_namespace = local.kubernetes_namespace + + # Defaults to true, but set it here so it can be disabled when switching to Pod Identities + service_account_role_arn_annotation_enabled = true + + iam_role_enabled = true + iam_source_policy_documents = [local.controller_policy_json] values = compact([ - # standard k8s object settings yamlencode({ fullnameOverride = module.this.name serviceAccount = { name = module.this.name } - resources = var.resources - rbac = { - create = var.rbac_enabled + controller = { + resources = var.resources } + replicas = var.replicas }), - # karpenter-specific values + # karpenter-specific values yamlencode({ - aws = { - defaultInstanceProfile = one(aws_iam_instance_profile.default[*].name) + logConfig = { + enabled = var.logging.enabled + logLevel = { + controller = var.logging.level.controller + global = var.logging.level.global + webhook = var.logging.level.webhook + } } - clusterName = local.eks_cluster_id - clusterEndpoint = local.eks_cluster_endpoint - }), + settings = merge({ + batchIdleDuration = var.settings.batch_idle_duration + batchMaxDuration = var.settings.batch_max_duration + clusterName = local.eks_cluster_id + }, + local.interruption_handler_enabled ? { + interruptionQueue = local.interruption_handler_queue_name + } : {} + ) + } + ), # additional values yamlencode(var.chart_values) ]) context = module.this.context - depends_on = [aws_iam_instance_profile.default] + depends_on = [ + module.karpenter_crd, + aws_cloudwatch_event_rule.interruption_handler, + aws_cloudwatch_event_target.interruption_handler, + aws_sqs_queue.interruption_handler, + aws_sqs_queue_policy.interruption_handler, + ] } diff --git a/modules/eks/karpenter/outputs.tf b/modules/eks/karpenter/outputs.tf index 830bd12aa..ac2640c71 100644 --- a/modules/eks/karpenter/outputs.tf +++ b/modules/eks/karpenter/outputs.tf @@ -2,8 +2,3 @@ output "metadata" { value = module.karpenter.metadata description = "Block status of the deployed release" } - -output "instance_profile" { - value = aws_iam_instance_profile.default - description = "Provisioned EC2 Instance Profile for nodes launched by Karpenter" -} diff --git a/modules/eks/karpenter/provider-helm.tf b/modules/eks/karpenter/provider-helm.tf index 9bb5edb6f..91cc7f6d4 100644 --- a/modules/eks/karpenter/provider-helm.tf +++ b/modules/eks/karpenter/provider-helm.tf @@ -21,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -42,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -51,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -101,16 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] # Provide dummy configuration for the case where the EKS cluster is not available. - certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") - eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -121,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -146,15 +180,16 @@ provider "helm" { provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/karpenter/providers.tf b/modules/eks/karpenter/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/eks/karpenter/providers.tf +++ b/modules/eks/karpenter/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/karpenter/remote-state.tf b/modules/eks/karpenter/remote-state.tf index 90c6ab1a8..723da0a44 100644 --- a/modules/eks/karpenter/remote-state.tf +++ b/modules/eks/karpenter/remote-state.tf @@ -1,8 +1,16 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name context = module.this.context + + # Attempt to allow this component to be deleted from Terraform state even after the EKS cluster has been deleted + defaults = { + eks_cluster_id = "deleted" + eks_cluster_arn = "deleted" + eks_cluster_identity_oidc_issuer = "deleted" + karpenter_node_role_arn = "deleted" + } } diff --git a/modules/eks/karpenter/variables.tf b/modules/eks/karpenter/variables.tf index 6aaa6b4fb..0c1117fa0 100644 --- a/modules/eks/karpenter/variables.tf +++ b/modules/eks/karpenter/variables.tf @@ -25,6 +25,18 @@ variable "chart_version" { default = null } +variable "crd_chart_enabled" { + type = bool + description = "`karpenter-crd` can be installed as an independent helm chart to manage the lifecycle of Karpenter CRDs. Set to `true` to install this CRD helm chart before the primary karpenter chart." + default = false +} + +variable "crd_chart" { + type = string + description = "The name of the Karpenter CRD chart to be installed, if `var.crd_chart_enabled` is set to `true`." + default = "karpenter-crd" +} + variable "resources" { type = object({ limits = object({ @@ -39,17 +51,6 @@ variable "resources" { description = "The CPU and memory of the deployment's limits and requests" } -variable "create_namespace" { - type = bool - description = "Create the namespace if it does not yet exist. Defaults to `false`" - default = null -} - -variable "kubernetes_namespace" { - type = string - description = "The namespace to install the release into" -} - variable "timeout" { type = number description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" @@ -91,3 +92,54 @@ variable "eks_component_name" { description = "The name of the eks component" default = "eks/cluster" } + +variable "interruption_handler_enabled" { + type = bool + default = true + description = < + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [helm](#requirement\_helm) | >= 2.6.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.9.0, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [keda](#module\_keda) | cloudposse/helm-release/aws | 0.10.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | `"keda"` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `"2.8"` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_namespace](#input\_create\_namespace) | Create the Kubernetes namespace if it does not yet exist | `bool` | `true` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [description](#input\_description) | Set release description attribute (visible in the history). | `string` | `"Used for autoscaling from external metrics configured as triggers."` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | n/a | yes | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [rbac\_enabled](#input\_rbac\_enabled) | Service Account for pods. | `bool` | `true` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [repository](#input\_repository) | Repository URL where to locate the requested chart. | `string` | `"https://kedacore.github.io/charts"` | no | +| [resources](#input\_resources) | A sub-nested map of deployment to resources. e.g. { operator = { requests = { cpu = 100m, memory = 100Mi }, limits = { cpu = 200m, memory = 200Mi } } } | `any` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `null` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [metadata](#output\_metadata) | Block status of the deployed release. | +| [service\_account\_name](#output\_service\_account\_name) | Kubernetes Service Account name | +| [service\_account\_namespace](#output\_service\_account\_namespace) | Kubernetes Service Account namespace | +| [service\_account\_policy\_arn](#output\_service\_account\_policy\_arn) | IAM policy ARN | +| [service\_account\_policy\_id](#output\_service\_account\_policy\_id) | IAM policy ID | +| [service\_account\_policy\_name](#output\_service\_account\_policy\_name) | IAM policy name | +| [service\_account\_role\_arn](#output\_service\_account\_role\_arn) | IAM role ARN | +| [service\_account\_role\_name](#output\_service\_account\_role\_name) | IAM role name | +| [service\_account\_role\_unique\_id](#output\_service\_account\_role\_unique\_id) | IAM role unique ID | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/keda) - + Cloud Posse's upstream component diff --git a/modules/eks/keda/context.tf b/modules/eks/keda/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/keda/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/keda/main.tf b/modules/eks/keda/main.tf new file mode 100644 index 000000000..fad7fe50d --- /dev/null +++ b/modules/eks/keda/main.tf @@ -0,0 +1,48 @@ +module "keda" { + source = "cloudposse/helm-release/aws" + version = "0.10.0" + + name = module.this.name + description = var.description + + repository = var.repository + chart = var.chart + chart_version = var.chart_version + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + + kubernetes_namespace = var.kubernetes_namespace + create_namespace = var.create_namespace + + service_account_name = module.this.name + service_account_namespace = var.kubernetes_namespace + + iam_role_enabled = true + + iam_policy_statements = [ + { + sid = "KedaOperatorSQS" + effect = "Allow" + actions = ["SQS:GetQueueAttributes"] + resources = ["*"] + } + ] + + values = compact([ + yamlencode({ + serviceAccount = { + name = module.this.name + } + rbac = { + create = var.rbac_enabled + } + }), + var.resources != null ? yamlencode({ resources = var.resources }) : "", + ]) + + context = module.this.context +} diff --git a/modules/eks/keda/outputs.tf b/modules/eks/keda/outputs.tf new file mode 100644 index 000000000..cab379b79 --- /dev/null +++ b/modules/eks/keda/outputs.tf @@ -0,0 +1,48 @@ +## eks_iam_role + +output "service_account_namespace" { + value = module.keda.service_account_namespace + description = "Kubernetes Service Account namespace" +} + +output "service_account_name" { + value = module.keda.service_account_name + description = "Kubernetes Service Account name" +} + +output "service_account_role_name" { + value = module.keda.service_account_role_name + description = "IAM role name" +} + +output "service_account_role_unique_id" { + value = module.keda.service_account_role_unique_id + description = "IAM role unique ID" +} + +output "service_account_role_arn" { + value = module.keda.service_account_role_arn + description = "IAM role ARN" +} + +output "service_account_policy_name" { + value = module.keda.service_account_policy_name + description = "IAM policy name" +} + +output "service_account_policy_id" { + value = module.keda.service_account_policy_id + description = "IAM policy ID" +} + +output "service_account_policy_arn" { + value = module.keda.service_account_policy_arn + description = "IAM policy ARN" +} + +## keda + +output "metadata" { + description = "Block status of the deployed release." + value = module.keda.metadata +} diff --git a/modules/eks/keda/provider-helm.tf b/modules/eks/keda/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/modules/eks/keda/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/keda/providers.tf b/modules/eks/keda/providers.tf new file mode 100644 index 000000000..45d458575 --- /dev/null +++ b/modules/eks/keda/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/keda/remote-state.tf b/modules/eks/keda/remote-state.tf new file mode 100644 index 000000000..c1ec8226d --- /dev/null +++ b/modules/eks/keda/remote-state.tf @@ -0,0 +1,8 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} diff --git a/modules/eks/keda/variables.tf b/modules/eks/keda/variables.tf new file mode 100644 index 000000000..d461f86d4 --- /dev/null +++ b/modules/eks/keda/variables.tf @@ -0,0 +1,81 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "rbac_enabled" { + type = bool + default = true + description = "Service Account for pods." +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "resources" { + type = any + description = "A sub-nested map of deployment to resources. e.g. { operator = { requests = { cpu = 100m, memory = 100Mi }, limits = { cpu = 200m, memory = 200Mi } } }" + default = null +} + +variable "kubernetes_namespace" { + type = string + description = "The namespace to install the release into." +} + +variable "description" { + type = string + description = "Set release description attribute (visible in the history)." + default = "Used for autoscaling from external metrics configured as triggers." +} + +variable "chart" { + type = string + description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended." + default = "keda" +} + +variable "chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." + default = "2.8" +} + +variable "repository" { + type = string + description = "Repository URL where to locate the requested chart." + default = "https://kedacore.github.io/charts" +} + +variable "create_namespace" { + type = bool + description = "Create the Kubernetes namespace if it does not yet exist" + default = true +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`." + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used." + default = true +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails." + default = true +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = null +} diff --git a/modules/eks/keda/versions.tf b/modules/eks/keda/versions.tf new file mode 100644 index 000000000..3e6c990e3 --- /dev/null +++ b/modules/eks/keda/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.6.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.9.0, != 2.21.0" + } + } +} diff --git a/modules/eks/loki/README.md b/modules/eks/loki/README.md new file mode 100644 index 000000000..3b96994cf --- /dev/null +++ b/modules/eks/loki/README.md @@ -0,0 +1,150 @@ +--- +tags: + - component/eks/loki + - layer/grafana + - provider/aws + - provider/helm +--- + +# Component: `eks/loki` + +Grafana Loki is a set of resources that can be combined into a fully featured logging stack. Unlike other logging +systems, Loki is built around the idea of only indexing metadata about your logs: labels (just like Prometheus labels). +Log data itself is then compressed and stored in chunks in object stores such as S3 or GCS, or even locally on a +filesystem. + +This component deploys the [grafana/loki](https://github.com/grafana/loki/tree/main/production/helm/loki) helm chart. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + eks/loki: + vars: + enabled: true + name: loki + alb_controller_ingress_group_component_name: eks/alb-controller-ingress-group/internal +``` + +> [!IMPORTANT] +> +> We recommend using an internal ALB for logging services. You must connect to the private network to access the Loki +> endpoint. + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | +| [random](#requirement\_random) | >= 2.3 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [random](#provider\_random) | >= 2.3 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [alb\_controller\_ingress\_group](#module\_alb\_controller\_ingress\_group) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [basic\_auth\_ssm\_parameters](#module\_basic\_auth\_ssm\_parameters) | cloudposse/ssm-parameter-store/aws | 0.13.0 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [loki](#module\_loki) | cloudposse/helm-release/aws | 0.10.1 | +| [loki\_storage](#module\_loki\_storage) | cloudposse/s3-bucket/aws | 4.2.0 | +| [loki\_tls\_label](#module\_loki\_tls\_label) | cloudposse/label/null | 0.25.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [random_pet.basic_auth_username](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource | +| [random_string.basic_auth_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_schema\_config](#input\_additional\_schema\_config) | A list of additional `configs` for the `schemaConfig` for the Loki chart. This list will be merged with the default schemaConfig.config defined by `var.default_schema_config` |
list(object({
from = string
object_store = string
schema = string
index = object({
prefix = string
period = string
})
}))
| `[]` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [alb\_controller\_ingress\_group\_component\_name](#input\_alb\_controller\_ingress\_group\_component\_name) | The name of the eks/alb-controller-ingress-group component. This should be an internal facing ALB | `string` | `"eks/alb-controller-ingress-group"` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [basic\_auth\_enabled](#input\_basic\_auth\_enabled) | If `true`, enabled Basic Auth for the Ingress service. A user and password will be created and stored in AWS SSM. | `bool` | `true` | no | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | `"loki"` | no | +| [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `"Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus."` | no | +| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | `"https://grafana.github.io/helm-charts"` | no | +| [chart\_values](#input\_chart\_values) | Additional values to yamlencode as `helm_release` values. | `any` | `{}` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `null` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_namespace](#input\_create\_namespace) | Create the Kubernetes namespace if it does not yet exist | `bool` | `true` | no | +| [default\_schema\_config](#input\_default\_schema\_config) | A list of default `configs` for the `schemaConfig` for the Loki chart. For new installations, the default schema config doesn't change. See https://grafana.com/docs/loki/latest/operations/storage/schema/#new-loki-installs |
list(object({
from = string
object_store = string
schema = string
index = object({
prefix = string
period = string
})
}))
|
[
{
"from": "2024-04-01",
"index": {
"period": "24h",
"prefix": "index_"
},
"object_store": "s3",
"schema": "v13",
"store": "tsdb"
}
]
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | Kubernetes namespace to install the release into | `string` | `"monitoring"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [ssm\_path\_template](#input\_ssm\_path\_template) | A string template to be used to create paths in AWS SSM to store basic auth credentials for this service | `string` | `"/%s/basic-auth/%s"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `300` | no | +| [verify](#input\_verify) | Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart | `bool` | `false` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [basic\_auth\_username](#output\_basic\_auth\_username) | If enabled, the username for basic auth | +| [id](#output\_id) | The ID of this deployment | +| [metadata](#output\_metadata) | Block status of the deployed release | +| [ssm\_path\_basic\_auth\_password](#output\_ssm\_path\_basic\_auth\_password) | If enabled, the path in AWS SSM to find the password for basic auth | +| [url](#output\_url) | The hostname used for this Loki deployment | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/loki) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/eks/loki/context.tf b/modules/eks/loki/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/loki/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/loki/main.tf b/modules/eks/loki/main.tf new file mode 100644 index 000000000..e7d78f2d9 --- /dev/null +++ b/modules/eks/loki/main.tf @@ -0,0 +1,190 @@ +locals { + enabled = module.this.enabled + + name = length(module.this.name) > 0 ? module.this.name : "loki" + ingress_host_name = format("%s.%s.%s", local.name, module.this.environment, module.dns_gbl_delegated.outputs.default_domain_name) + ingress_group_name = module.alb_controller_ingress_group.outputs.group_name + + ssm_path_password = format(var.ssm_path_template, module.this.id, "password") +} + +resource "random_pet" "basic_auth_username" { + count = local.enabled && var.basic_auth_enabled ? 1 : 0 +} + +resource "random_string" "basic_auth_password" { + count = local.enabled && var.basic_auth_enabled ? 1 : 0 + + length = 12 + special = true +} + +module "basic_auth_ssm_parameters" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.13.0" + + enabled = local.enabled && var.basic_auth_enabled + + parameter_write = [ + { + name = format(var.ssm_path_template, module.this.id, "username") + value = random_pet.basic_auth_username[0].id + description = "Basic Auth Username for ${module.this.id}" + type = "SecureString" + overwrite = true + }, + { + name = local.ssm_path_password + value = random_string.basic_auth_password[0].result + description = "Basic Auth Password for ${module.this.id}" + type = "SecureString" + overwrite = true + } + ] + + context = module.this.context +} + +module "loki_storage" { + source = "cloudposse/s3-bucket/aws" + version = "4.2.0" + + for_each = toset(["chunks", "ruler", "admin"]) + + name = local.name + attributes = [each.key] + + enabled = local.enabled + + context = module.this.context +} + +module "loki_tls_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + enabled = local.enabled + + attributes = ["tls"] + + context = module.this.context +} + +module "loki" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + enabled = local.enabled + + name = local.name + chart = var.chart + description = var.chart_description + repository = var.chart_repository + chart_version = var.chart_version + + kubernetes_namespace = var.kubernetes_namespace + create_namespace = var.create_namespace + + verify = var.verify + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + + iam_role_enabled = true + iam_policy = [{ + statements = [ + { + sid = "AllowLokiStorageAccess" + effect = "Allow" + resources = [ + module.loki_storage["chunks"].bucket_arn, + module.loki_storage["ruler"].bucket_arn, + module.loki_storage["admin"].bucket_arn, + format("%s/*", module.loki_storage["chunks"].bucket_arn), + format("%s/*", module.loki_storage["ruler"].bucket_arn), + format("%s/*", module.loki_storage["admin"].bucket_arn), + ] + actions = [ + "s3:ListBucket", + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject" + ] + }, + ] + }] + + values = compact([ + yamlencode({ + loki = { + # For new installations, schema config doesnt change. See the following: + # https://grafana.com/docs/loki/latest/operations/storage/schema/#new-loki-installs + schemaConfig = { + configs = concat(var.default_schema_config, var.additional_schema_config) + } + storage = { + bucketNames = { + chunks = module.loki_storage["chunks"].bucket_id + ruler = module.loki_storage["ruler"].bucket_id + admin = module.loki_storage["admin"].bucket_id + }, + type = "s3", + s3 = { + region = var.region + } + } + } + # Do not use the default nginx gateway + gateway = { + enabled = false + } + # Instead, we want to use AWS ALB Ingress Controller + ingress = { + enabled = true + annotations = { + "kubernetes.io/ingress.class" = "alb" + "external-dns.alpha.kubernetes.io/hostname" = local.ingress_host_name + "alb.ingress.kubernetes.io/group.name" = local.ingress_group_name + # We dont need to supply "alb.ingress.kubernetes.io/certificate-arn" because of AWS ALB controller's auto discovery using the given host + "alb.ingress.kubernetes.io/backend-protocol" = "HTTP" + "alb.ingress.kubernetes.io/listen-ports" = "[{\"HTTP\": 80},{\"HTTPS\":443}]" + "alb.ingress.kubernetes.io/ssl-redirect" = "443" + "alb.ingress.kubernetes.io/scheme" = "internal" + "alb.ingress.kubernetes.io/target-type" = "ip" + } + hosts = [ + local.ingress_host_name + ] + tls = [ + { + secretName = module.loki_tls_label.id + hosts = [local.ingress_host_name] + } + ] + } + # Loki Canary does not work when gateway is disabled + # https://github.com/grafana/loki/issues/11208 + test = { + enabled = false + } + lokiCanary = { + enabled = false + } + }), + yamlencode( + var.basic_auth_enabled ? { + basicAuth = { + enabled = true + username = random_pet.basic_auth_username[0].id + password = random_string.basic_auth_password[0].result + } + } : {} + ), + yamlencode(var.chart_values), + ]) + + context = module.this.context +} diff --git a/modules/eks/loki/outputs.tf b/modules/eks/loki/outputs.tf new file mode 100644 index 000000000..8fe9b3aea --- /dev/null +++ b/modules/eks/loki/outputs.tf @@ -0,0 +1,24 @@ +output "metadata" { + value = module.loki.metadata + description = "Block status of the deployed release" +} + +output "id" { + value = module.this.id + description = "The ID of this deployment" +} + +output "url" { + value = local.ingress_host_name + description = "The hostname used for this Loki deployment" +} + +output "basic_auth_username" { + value = random_pet.basic_auth_username[0].id + description = "If enabled, the username for basic auth" +} + +output "ssm_path_basic_auth_password" { + value = local.ssm_path_password + description = "If enabled, the path in AWS SSM to find the password for basic auth" +} diff --git a/modules/eks/loki/provider-helm.tf b/modules/eks/loki/provider-helm.tf new file mode 100644 index 000000000..64459d4f4 --- /dev/null +++ b/modules/eks/loki/provider-helm.tf @@ -0,0 +1,166 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" +} + +variable "kubeconfig_context" { + type = string + default = "" + description = "Context to choose from the Kubernetes kube config file" +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = base64decode(local.certificate_authority_data) + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster + # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. + config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + config_context = var.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = base64decode(local.certificate_authority_data) + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster + # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. + config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + config_context = var.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/loki/providers.tf b/modules/eks/loki/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/eks/loki/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/loki/remote-state.tf b/modules/eks/loki/remote-state.tf new file mode 100644 index 000000000..0ff7ae72f --- /dev/null +++ b/modules/eks/loki/remote-state.tf @@ -0,0 +1,39 @@ +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "alb_controller_ingress_group_component_name" { + type = string + description = "The name of the eks/alb-controller-ingress-group component. This should be an internal facing ALB" + default = "eks/alb-controller-ingress-group" +} + +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} + +module "alb_controller_ingress_group" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.alb_controller_ingress_group_component_name + + context = module.this.context +} + +module "dns_gbl_delegated" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + environment = "gbl" + component = "dns-delegated" + + context = module.this.context +} diff --git a/modules/eks/loki/variables.tf b/modules/eks/loki/variables.tf new file mode 100644 index 000000000..c51d15817 --- /dev/null +++ b/modules/eks/loki/variables.tf @@ -0,0 +1,127 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "basic_auth_enabled" { + type = bool + description = "If `true`, enabled Basic Auth for the Ingress service. A user and password will be created and stored in AWS SSM." + default = true +} + +variable "ssm_path_template" { + type = string + description = "A string template to be used to create paths in AWS SSM to store basic auth credentials for this service" + default = "/%s/basic-auth/%s" +} + +variable "chart_description" { + type = string + description = "Set release description attribute (visible in the history)." + default = "Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus." +} + +variable "chart" { + type = string + description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended." + default = "loki" +} + +variable "chart_repository" { + type = string + description = "Repository URL where to locate the requested chart." + default = "https://grafana.github.io/helm-charts" +} + +variable "chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." + default = null +} + +variable "kubernetes_namespace" { + type = string + description = "Kubernetes namespace to install the release into" + default = "monitoring" +} + +variable "create_namespace" { + type = bool + description = "Create the Kubernetes namespace if it does not yet exist" + default = true +} + +variable "verify" { + type = bool + description = "Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart" + default = false +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`." + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used." + default = true +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails." + default = true +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = 300 +} + +variable "chart_values" { + type = any + description = "Additional values to yamlencode as `helm_release` values." + default = {} +} + +variable "default_schema_config" { + type = list(object({ + from = string + object_store = string + schema = string + index = object({ + prefix = string + period = string + }) + })) + description = "A list of default `configs` for the `schemaConfig` for the Loki chart. For new installations, the default schema config doesn't change. See https://grafana.com/docs/loki/latest/operations/storage/schema/#new-loki-installs" + default = [ + { + from = "2024-04-01" # for a new install, this must be a date in the past, use a recent date. Format is YYYY-MM-DD. + object_store = "s3" + store = "tsdb" + schema = "v13" + index = { + prefix = "index_" + period = "24h" + } + } + ] +} + +variable "additional_schema_config" { + type = list(object({ + from = string + object_store = string + schema = string + index = object({ + prefix = string + period = string + }) + })) + description = "A list of additional `configs` for the `schemaConfig` for the Loki chart. This list will be merged with the default schemaConfig.config defined by `var.default_schema_config`" + default = [] +} diff --git a/modules/eks/loki/versions.tf b/modules/eks/loki/versions.tf new file mode 100644 index 000000000..8b4106a3b --- /dev/null +++ b/modules/eks/loki/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.7.1, != 2.21.0" + } + random = { + source = "hashicorp/random" + version = ">= 2.3" + } + } +} diff --git a/modules/eks/metrics-server/README.md b/modules/eks/metrics-server/README.md index 3ecaf303d..743edc51e 100644 --- a/modules/eks/metrics-server/README.md +++ b/modules/eks/metrics-server/README.md @@ -1,6 +1,15 @@ -# Component: `metrics-server` +--- +tags: + - component/eks/metrics-server + - layer/eks + - provider/aws + - provider/helm +--- -This component creates a Helm release for [metrics-server](https://github.com/kubernetes-sigs/metrics-server) is a Kubernetes addon that provides resource usage metrics used in particular by other addons such Horizontal Pod Autoscaler. +# Component: `eks/metrics-server` + +This component creates a Helm release for [metrics-server](https://github.com/kubernetes-sigs/metrics-server) is a +Kubernetes addon that provides resource usage metrics used in particular by other addons such Horizontal Pod Autoscaler. ## Usage @@ -37,36 +46,36 @@ components: chart_values: {} ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | | [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.14.0, != 2.21.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 4.9.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [metrics\_server](#module\_metrics\_server) | cloudposse/helm-release/aws | 0.5.0 | +| [metrics\_server](#module\_metrics\_server) | cloudposse/helm-release/aws | 0.10.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources | Name | Type | |------|------| -| [kubernetes_namespace.default](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | | [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | ## Inputs @@ -76,34 +85,33 @@ components: | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | n/a | yes | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | `"metrics-server"` | no | | [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `null` | no | -| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | n/a | yes | +| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | `"https://charts.bitnami.com/bitnami"` | no | | [chart\_values](#input\_chart\_values) | Additional values to yamlencode as `helm_release` values. | `any` | `{}` | no | -| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `null` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `"6.2.6"` | no | | [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false`. | `bool` | `null` | no | +| [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `true`. | `bool` | `true` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | -| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | n/a | yes | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | `"metrics-server"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -113,7 +121,7 @@ components: | [rbac\_enabled](#input\_rbac\_enabled) | Service Account for pods. | `bool` | `true` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region. | `string` | n/a | yes | -| [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
| n/a | yes | +| [resources](#input\_resources) | The cpu and memory of the deployment's limits and requests. |
object({
limits = object({
cpu = string
memory = string
})
requests = object({
cpu = string
memory = string
})
})
|
{
"limits": {
"cpu": "100m",
"memory": "300Mi"
},
"requests": {
"cpu": "20m",
"memory": "60Mi"
}
}
| no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -126,6 +134,7 @@ components: |------|-------------| | [metadata](#output\_metadata) | Block status of the deployed release | + ## References diff --git a/modules/eks/metrics-server/default.auto.tfvars b/modules/eks/metrics-server/default.auto.tfvars deleted file mode 100644 index 2e2708d6d..000000000 --- a/modules/eks/metrics-server/default.auto.tfvars +++ /dev/null @@ -1,21 +0,0 @@ -enabled = false - -name = "metrics-server" - -chart = "metrics-server" -chart_repository = "https://charts.bitnami.com/bitnami" -chart_version = "5.11.4" - -create_namespace = true -kubernetes_namespace = "metrics-server" - -resources = { - limits = { - cpu = "100m" - memory = "300Mi" - }, - requests = { - cpu = "20m" - memory = "60Mi" - } -} diff --git a/modules/eks/metrics-server/main.tf b/modules/eks/metrics-server/main.tf index 620599d70..49c0e0279 100644 --- a/modules/eks/metrics-server/main.tf +++ b/modules/eks/metrics-server/main.tf @@ -2,31 +2,27 @@ locals { enabled = module.this.enabled } -resource "kubernetes_namespace" "default" { - count = local.enabled && var.create_namespace ? 1 : 0 - - metadata { - name = var.kubernetes_namespace - - labels = module.this.tags - } +moved { + from = kubernetes_namespace.default + to = module.metrics_server.kubernetes_namespace.default } module "metrics_server" { source = "cloudposse/helm-release/aws" - version = "0.5.0" + version = "0.10.1" + + name = "" # avoids hitting length restrictions on IAM Role names + chart = var.chart + repository = var.chart_repository + description = var.chart_description + chart_version = var.chart_version + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout - name = "" # avoids hitting length restrictions on IAM Role names - chart = var.chart - repository = var.chart_repository - description = var.chart_description - chart_version = var.chart_version - kubernetes_namespace = join("", kubernetes_namespace.default.*.id) - create_namespace = false - wait = var.wait - atomic = var.atomic - cleanup_on_fail = var.cleanup_on_fail - timeout = var.timeout + kubernetes_namespace = var.kubernetes_namespace + create_namespace_with_kubernetes = var.create_namespace eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") @@ -47,7 +43,9 @@ module "metrics_server" { # metrics-server-specific values yamlencode({ podLabels = merge({ - chart = var.chart + chart = var.chart + # TODO: These should be configurable + # Chart should default to https://kubernetes-sigs.github.io/metrics-server/ repo = "bitnami" component = "hpa" namespace = var.kubernetes_namespace diff --git a/modules/eks/metrics-server/provider-helm.tf b/modules/eks/metrics-server/provider-helm.tf index 20e4d3837..91cc7f6d4 100644 --- a/modules/eks/metrics-server/provider-helm.tf +++ b/modules/eks/metrics-server/provider-helm.tf @@ -2,6 +2,12 @@ # # This file is a drop-in to provide a helm provider. # +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# # All the following variables are just about configuring the Kubernetes provider # to be able to modify EKS cluster. The reason there are so many options is # because at various times, each one of them has had problems, so we give you a choice. @@ -15,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -36,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -45,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -95,14 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] - certificate_authority_data = module.eks.outputs.eks_cluster_certificate_authority_data - eks_cluster_id = module.eks.outputs.eks_cluster_id - eks_cluster_endpoint = module.eks.outputs.eks_cluster_endpoint + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -113,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -132,21 +174,22 @@ provider "helm" { } } experiments { - manifest = var.helm_manifest_experiment_enabled + manifest = var.helm_manifest_experiment_enabled && module.this.enabled } } provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/metrics-server/providers.tf b/modules/eks/metrics-server/providers.tf index 74ff8e62c..89ed50a98 100644 --- a/modules/eks/metrics-server/providers.tf +++ b/modules/eks/metrics-server/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/metrics-server/remote-state.tf b/modules/eks/metrics-server/remote-state.tf index 6ef90fd26..c1ec8226d 100644 --- a/modules/eks/metrics-server/remote-state.tf +++ b/modules/eks/metrics-server/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/metrics-server/variables.tf b/modules/eks/metrics-server/variables.tf index 29173515e..eb563b1ee 100644 --- a/modules/eks/metrics-server/variables.tf +++ b/modules/eks/metrics-server/variables.tf @@ -12,17 +12,20 @@ variable "chart_description" { variable "chart" { type = string description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended." + default = "metrics-server" } variable "chart_repository" { type = string description = "Repository URL where to locate the requested chart." + # TODO: Chart should default to https://kubernetes-sigs.github.io/metrics-server/ + default = "https://charts.bitnami.com/bitnami" } variable "chart_version" { type = string description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." - default = null + default = "6.2.6" } variable "resources" { @@ -37,17 +40,28 @@ variable "resources" { }) }) description = "The cpu and memory of the deployment's limits and requests." + default = { + limits = { + cpu = "100m" + memory = "300Mi" + } + requests = { + cpu = "20m" + memory = "60Mi" + } + } } variable "create_namespace" { type = bool - description = "Create the namespace if it does not yet exist. Defaults to `false`." - default = null + description = "Create the namespace if it does not yet exist. Defaults to `true`." + default = true } variable "kubernetes_namespace" { type = string description = "The namespace to install the release into." + default = "metrics-server" } variable "timeout" { diff --git a/modules/eks/metrics-server/versions.tf b/modules/eks/metrics-server/versions.tf index 58318d20e..9f0f54df7 100644 --- a/modules/eks/metrics-server/versions.tf +++ b/modules/eks/metrics-server/versions.tf @@ -1,14 +1,18 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.9.0" } helm = { source = "hashicorp/helm" version = ">= 2.0" } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.14.0, != 2.21.0" + } } } diff --git a/modules/eks/prometheus-scraper/README.md b/modules/eks/prometheus-scraper/README.md new file mode 100644 index 000000000..fc6754aa8 --- /dev/null +++ b/modules/eks/prometheus-scraper/README.md @@ -0,0 +1,166 @@ +--- +tags: + - component/eks/prometheus-scraper + - layer/grafana + - provider/aws + - provider/helm +--- + +# Component: `eks/prometheus-scraper` + +This component provisions the an Amazon Managed collector or scraper to connect Amazon Managed Prometheus (AMP) with an +EKS cluster. + +A common use case for Amazon Managed Service for Prometheus is to monitor Kubernetes clusters managed by Amazon Elastic +Kubernetes Service (Amazon EKS). Kubernetes clusters, and many applications that run within Amazon EKS, automatically +export their metrics for Prometheus-compatible scrapers to access. + +Amazon Managed Service for Prometheus provides a fully managed, agentless scraper, or collector, that automatically +discovers and pulls Prometheus-compatible metrics. You don't have to manage, install, patch, or maintain agents or +scrapers. An Amazon Managed Service for Prometheus collector provides reliable, stable, highly available, automatically +scaled collection of metrics for your Amazon EKS cluster. Amazon Managed Service for Prometheus managed collectors work +with Amazon EKS clusters, including EC2 and Fargate. + +An Amazon Managed Service for Prometheus collector creates an Elastic Network Interface (ENI) per subnet specified when +creating the scraper. The collector scrapes the metrics through these ENIs, and uses remote_write to push the data to +your Amazon Managed Service for Prometheus workspace using a VPC endpoint. The scraped data never travels on the public +internet. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + eks/prometheus-scraper: + vars: + enabled: true + name: prometheus-scraper + # This refers to the `managed-prometheus/workspace` Terraform component, + # but the component name can be whatever you choose to name the stack component + prometheus_component_name: prometheus +``` + +### Authenticating with EKS + +In order for this managed collector to authenticate with the EKS cluster, update auth map after deploying. + +Note the `scraper_role_arn` and `clusterrole_username` outputs and set them to `rolearn` and `username` respectively +with the `map_additional_iam_roles` input for `eks/cluster`. + +```yaml +components: + terraform: + eks/cluster: + vars: + map_additional_iam_roles: + # this role is used to grant the Prometheus scraper access to this cluster. See eks/prometheus-scraper + - rolearn: "arn:aws:iam::111111111111:role/AWSServiceRoleForAmazonPrometheusScraper_111111111111111" + username: "acme-plat-ue2-sandbox-prometheus-scraper" + groups: [] +``` + +Then reapply the given cluster component. + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [prometheus](#module\_prometheus) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [scraper\_access](#module\_scraper\_access) | cloudposse/helm-release/aws | 0.10.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_prometheus_scraper.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/prometheus_scraper) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `"AWS Managed Prometheus (AMP) scrapper roles and role bindings"` | no | +| [chart\_values](#input\_chart\_values) | Additional values to yamlencode as `helm_release` values. | `any` | `{}` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_namespace](#input\_create\_namespace) | Create the Kubernetes namespace if it does not yet exist | `bool` | `true` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [eks\_scrape\_configuration](#input\_eks\_scrape\_configuration) | Scrape configuration for the agentless scraper that will installed with EKS integrations | `string` | `"global:\n scrape_interval: 30s\nscrape_configs:\n # pod metrics\n - job_name: pod_exporter\n kubernetes_sd_configs:\n - role: pod\n # container metrics\n - job_name: cadvisor\n scheme: https\n authorization:\n credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n kubernetes_sd_configs:\n - role: node\n relabel_configs:\n - action: labelmap\n regex: __meta_kubernetes_node_label_(.+)\n - replacement: kubernetes.default.svc:443\n target_label: __address__\n - source_labels: [__meta_kubernetes_node_name]\n regex: (.+)\n target_label: __metrics_path__\n replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor\n # apiserver metrics\n - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n job_name: kubernetes-apiservers\n kubernetes_sd_configs:\n - role: endpoints\n relabel_configs:\n - action: keep\n regex: default;kubernetes;https\n source_labels:\n - __meta_kubernetes_namespace\n - __meta_kubernetes_service_name\n - __meta_kubernetes_endpoint_port_name\n scheme: https\n # kube proxy metrics\n - job_name: kube-proxy\n honor_labels: true\n kubernetes_sd_configs:\n - role: pod\n relabel_configs:\n - action: keep\n source_labels:\n - __meta_kubernetes_namespace\n - __meta_kubernetes_pod_name\n separator: '/'\n regex: 'kube-system/kube-proxy.+'\n - source_labels:\n - __address__\n action: replace\n target_label: __address__\n regex: (.+?)(\\\\:\\\\d+)?\n replacement: $1:10249\n"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | Kubernetes namespace to install the release into | `string` | `"kube-system"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [prometheus\_component\_name](#input\_prometheus\_component\_name) | The name of the Amazon Managed Prometheus workspace component | `string` | `"managed-prometheus/workspace"` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `300` | no | +| [verify](#input\_verify) | Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart | `bool` | `false` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | The name of the vpc component | `string` | `"vpc"` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [clusterrole\_username](#output\_clusterrole\_username) | The username of the ClusterRole used to give the scraper in-cluster permissions | +| [scraper\_role\_arn](#output\_scraper\_role\_arn) | The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover, collect, and produce metrics | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/prometheus-scraper) - + Cloud Posse's upstream component +- [AMP Collector Documentation](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-collector-how-to.html#AMP-collector-eks-setup) + +[](https://cpco.io/component) diff --git a/modules/eks/prometheus-scraper/charts/scraper-access/Chart.yaml b/modules/eks/prometheus-scraper/charts/scraper-access/Chart.yaml new file mode 100644 index 000000000..7dcaf9a3e --- /dev/null +++ b/modules/eks/prometheus-scraper/charts/scraper-access/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: scraper-access +description: A Helm chart for identity provider roles + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" diff --git a/modules/eks/prometheus-scraper/charts/scraper-access/templates/clusterrole-binding.yml b/modules/eks/prometheus-scraper/charts/scraper-access/templates/clusterrole-binding.yml new file mode 100644 index 000000000..e2a8feced --- /dev/null +++ b/modules/eks/prometheus-scraper/charts/scraper-access/templates/clusterrole-binding.yml @@ -0,0 +1,26 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Values.cluster_role_name }} +rules: + - apiGroups: [""] + resources: ["nodes", "nodes/proxy", "nodes/metrics", "services", "endpoints", "pods", "ingresses", "configmaps"] + verbs: ["describe", "get", "list", "watch"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses/status", "ingresses"] + verbs: ["describe", "get", "list", "watch"] + - nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.cluster_role_name }}-binding +subjects: +- kind: User + name: {{ .Values.cluster_user_name }} + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: {{ .Values.cluster_role_name }} + apiGroup: rbac.authorization.k8s.io diff --git a/modules/eks/prometheus-scraper/charts/scraper-access/values.yaml b/modules/eks/prometheus-scraper/charts/scraper-access/values.yaml new file mode 100644 index 000000000..009dde0b8 --- /dev/null +++ b/modules/eks/prometheus-scraper/charts/scraper-access/values.yaml @@ -0,0 +1,8 @@ +# Default values for scraper-access. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# These default values can be overridden per environment in conf/.yaml files + +cluster_role_name: "aps-collector-role" +cluster_user_name: "aps-collector-user" diff --git a/modules/eks/prometheus-scraper/context.tf b/modules/eks/prometheus-scraper/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/prometheus-scraper/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/prometheus-scraper/main.tf b/modules/eks/prometheus-scraper/main.tf new file mode 100644 index 000000000..01b3e6d0b --- /dev/null +++ b/modules/eks/prometheus-scraper/main.tf @@ -0,0 +1,68 @@ +locals { + enabled = module.this.enabled + + # This will be used as the name of the ClusterRole and binded User + aps_clusterrole_identity = module.this.id + + # Amazon EKS requires a different format for this ARN. You must adjust the format of the returned ARN + # arn:aws:iam::account-id:role/AWSServiceRoleForAmazonPrometheusScraper_unique-id + # + # For example, + # arn:aws:iam::111122223333:role/aws-service-role/scraper.aps.amazonaws.com/AWSServiceRoleForAmazonPrometheusScraper_1234abcd-56ef-7 + # must be changed be to + # arn:aws:iam::111122223333:role/AWSServiceRoleForAmazonPrometheusScraper_1234abcd-56ef-7 + aps_clusterrole_username = replace(aws_prometheus_scraper.this[0].role_arn, "role/aws-service-role/scraper.aps.amazonaws.com", "role") + +} + +resource "aws_prometheus_scraper" "this" { + count = local.enabled ? 1 : 0 + + source { + eks { + cluster_arn = module.eks.outputs.eks_cluster_arn + security_group_ids = [module.eks.outputs.eks_cluster_managed_security_group_id] + subnet_ids = module.vpc.outputs.private_subnet_ids + } + } + + destination { + amp { + workspace_arn = module.prometheus.outputs.workspace_arn + } + } + + scrape_configuration = var.eks_scrape_configuration +} + +module "scraper_access" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + enabled = local.enabled + + name = length(module.this.name) > 0 ? module.this.name : "prometheus" + chart = "${path.module}/charts/scraper-access" + description = var.chart_description + + kubernetes_namespace = var.kubernetes_namespace + create_namespace = var.create_namespace + + verify = var.verify + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + + values = compact([ + yamlencode({ + cluster_role_name = local.aps_clusterrole_identity + cluster_user_name = local.aps_clusterrole_identity + }), + yamlencode(var.chart_values), + ]) + + context = module.this.context +} diff --git a/modules/eks/prometheus-scraper/outputs.tf b/modules/eks/prometheus-scraper/outputs.tf new file mode 100644 index 000000000..55cc28502 --- /dev/null +++ b/modules/eks/prometheus-scraper/outputs.tf @@ -0,0 +1,9 @@ +output "scraper_role_arn" { + description = "The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover, collect, and produce metrics" + value = local.aps_clusterrole_username +} + +output "clusterrole_username" { + description = "The username of the ClusterRole used to give the scraper in-cluster permissions" + value = local.aps_clusterrole_identity +} diff --git a/modules/eks/prometheus-scraper/provider-helm.tf b/modules/eks/prometheus-scraper/provider-helm.tf new file mode 100644 index 000000000..64459d4f4 --- /dev/null +++ b/modules/eks/prometheus-scraper/provider-helm.tf @@ -0,0 +1,166 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" +} + +variable "kubeconfig_context" { + type = string + default = "" + description = "Context to choose from the Kubernetes kube config file" +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = base64decode(local.certificate_authority_data) + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster + # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. + config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + config_context = var.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = base64decode(local.certificate_authority_data) + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster + # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. + config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + config_context = var.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/prometheus-scraper/providers.tf b/modules/eks/prometheus-scraper/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/eks/prometheus-scraper/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/prometheus-scraper/remote-state.tf b/modules/eks/prometheus-scraper/remote-state.tf new file mode 100644 index 000000000..d05dbc0bc --- /dev/null +++ b/modules/eks/prometheus-scraper/remote-state.tf @@ -0,0 +1,26 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.vpc_component_name + + context = module.this.context +} + +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} + +module "prometheus" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.prometheus_component_name + + context = module.this.context +} diff --git a/modules/eks/prometheus-scraper/variables.tf b/modules/eks/prometheus-scraper/variables.tf new file mode 100644 index 000000000..26fd56db3 --- /dev/null +++ b/modules/eks/prometheus-scraper/variables.tf @@ -0,0 +1,137 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "vpc_component_name" { + type = string + description = "The name of the vpc component" + default = "vpc" +} + +variable "prometheus_component_name" { + type = string + description = "The name of the Amazon Managed Prometheus workspace component" + default = "managed-prometheus/workspace" +} + +variable "eks_scrape_configuration" { + type = string + description = "Scrape configuration for the agentless scraper that will installed with EKS integrations" + default = <<-EOT + global: + scrape_interval: 30s + scrape_configs: + # pod metrics + - job_name: pod_exporter + kubernetes_sd_configs: + - role: pod + # container metrics + - job_name: cadvisor + scheme: https + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + # apiserver metrics + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-apiservers + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: keep + regex: default;kubernetes;https + source_labels: + - __meta_kubernetes_namespace + - __meta_kubernetes_service_name + - __meta_kubernetes_endpoint_port_name + scheme: https + # kube proxy metrics + - job_name: kube-proxy + honor_labels: true + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + source_labels: + - __meta_kubernetes_namespace + - __meta_kubernetes_pod_name + separator: '/' + regex: 'kube-system/kube-proxy.+' + - source_labels: + - __address__ + action: replace + target_label: __address__ + regex: (.+?)(\\:\\d+)? + replacement: $1:10249 + EOT +} + +variable "chart_description" { + type = string + description = "Set release description attribute (visible in the history)." + default = "AWS Managed Prometheus (AMP) scrapper roles and role bindings" +} + +variable "kubernetes_namespace" { + type = string + description = "Kubernetes namespace to install the release into" + default = "kube-system" +} + +variable "create_namespace" { + type = bool + description = "Create the Kubernetes namespace if it does not yet exist" + default = true +} + +variable "verify" { + type = bool + description = "Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart" + default = false +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`." + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used." + default = true +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails." + default = true +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = 300 +} + +variable "chart_values" { + type = any + description = "Additional values to yamlencode as `helm_release` values." + default = {} +} diff --git a/modules/eks/prometheus-scraper/versions.tf b/modules/eks/prometheus-scraper/versions.tf new file mode 100644 index 000000000..fb8857fab --- /dev/null +++ b/modules/eks/prometheus-scraper/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.7.1, != 2.21.0" + } + } +} diff --git a/modules/eks/promtail/README.md b/modules/eks/promtail/README.md new file mode 100644 index 000000000..ecefac8bd --- /dev/null +++ b/modules/eks/promtail/README.md @@ -0,0 +1,131 @@ +--- +tags: + - component/eks/promtail + - layer/grafana + - provider/aws + - provider/helm +--- + +# Component: `eks/promtail` + +Promtail is an agent which ships the contents of local logs to a Loki instance. + +This component deploys the [grafana/promtail](https://github.com/grafana/helm-charts/tree/main/charts/promtail) helm +chart and expects `eks/loki` to be deployed. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + eks/promtail: + vars: + enabled: true + name: promtail +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [alb\_controller\_ingress\_group](#module\_alb\_controller\_ingress\_group) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [chart\_values](#module\_chart\_values) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [loki](#module\_loki) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [promtail](#module\_promtail) | cloudposse/helm-release/aws | 0.10.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_ssm_parameter.basic_auth_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [alb\_controller\_ingress\_group\_component\_name](#input\_alb\_controller\_ingress\_group\_component\_name) | The name of the eks/alb-controller-ingress-group component. This should be an internal facing ALB | `string` | `"eks/alb-controller-ingress-group"` | no | +| [atomic](#input\_atomic) | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. | `bool` | `true` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart](#input\_chart) | Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended. | `string` | `"promtail"` | no | +| [chart\_description](#input\_chart\_description) | Set release description attribute (visible in the history). | `string` | `"Promtail is an agent which ships the contents of local logs to a Loki instance"` | no | +| [chart\_repository](#input\_chart\_repository) | Repository URL where to locate the requested chart. | `string` | `"https://grafana.github.io/helm-charts"` | no | +| [chart\_values](#input\_chart\_values) | Additional values to yamlencode as `helm_release` values. | `any` | `{}` | no | +| [chart\_version](#input\_chart\_version) | Specify the exact chart version to install. If this is not specified, the latest version is installed. | `string` | `null` | no | +| [cleanup\_on\_fail](#input\_cleanup\_on\_fail) | Allow deletion of new resources created in this upgrade when upgrade fails. | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_namespace](#input\_create\_namespace) | Create the Kubernetes namespace if it does not yet exist | `bool` | `true` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | Kubernetes namespace to install the release into | `string` | `"monitoring"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [loki\_component\_name](#input\_loki\_component\_name) | The name of the eks/loki component | `string` | `"eks/loki"` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [push\_api](#input\_push\_api) | Describes and configures Promtail to expose a Loki push API server with an Ingress configuration.

- enabled: Set this to `true` to enable this feature
- scrape\_config: Optional. This component includes a basic configuration by default, or override the default configuration here. |
object({
enabled = optional(bool, false)
scrape_config = optional(string, "")
})
| `{}` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [scrape\_configs](#input\_scrape\_configs) | A list of local path paths starting with this component's base path for Promtail Scrape Configs | `list(string)` |
[
"scrape_config/default_kubernetes_pods.yaml"
]
| no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds | `number` | `300` | no | +| [verify](#input\_verify) | Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart | `bool` | `false` | no | +| [wait](#input\_wait) | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`. | `bool` | `true` | no | + +## Outputs + +No outputs. + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/promtail) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/eks/promtail/context.tf b/modules/eks/promtail/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/promtail/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/promtail/main.tf b/modules/eks/promtail/main.tf new file mode 100644 index 000000000..954ebbeeb --- /dev/null +++ b/modules/eks/promtail/main.tf @@ -0,0 +1,136 @@ +locals { + enabled = module.this.enabled + name = length(module.this.name) > 0 ? module.this.name : "promtail" + + # Assume basic auth is enabled if the loki component has a basic auth username output + basic_auth_enabled = local.enabled && length(module.loki.outputs.basic_auth_username) > 0 + + # These are the default values required to connect to eks/loki in the same namespace + loki_write_chart_values = { + config = { + clients = [ + { + # Intentionally choose the loki-write service not loki-gateway. Loki gateway is disabled + url = "http://loki-write:3100/loki/api/v1/push" + tenant_id = "1" + basic_auth = local.basic_auth_enabled ? { + username = module.loki.outputs.basic_auth_username + password = data.aws_ssm_parameter.basic_auth_password[0].value + } : {} + } + ] + } + } + + # These are optional values used to expose an endpoint for the Push API + # https://grafana.com/docs/loki/latest/send-data/promtail/configuration/#loki_push_api + push_api_enabled = local.enabled && var.push_api.enabled + ingress_host_name = local.push_api_enabled ? format("%s.%s.%s", local.name, module.this.environment, module.dns_gbl_delegated[0].outputs.default_domain_name) : "" + ingress_group_name = local.push_api_enabled ? module.alb_controller_ingress_group[0].outputs.group_name : "" + default_push_api_scrape_config = <<-EOT + - job_name: push + loki_push_api: + server: + http_listen_port: 3500 + grpc_listen_port: 3600 + labels: + push: default + EOT + push_api_chart_values = { + config = { + snippets = { + extraScrapeConfigs = length(var.push_api.scrape_config) > 0 ? var.push_api.scrape_config : local.default_push_api_scrape_config + } + } + extraPorts = { + push = { + name = "push" + containerPort = "3500" + protocol = "TCP" + service = { + type = "ClusterIP" + port = "3500" + } + ingress = { + annotations = { + "kubernetes.io/ingress.class" = "alb" + "external-dns.alpha.kubernetes.io/hostname" = local.ingress_host_name + "alb.ingress.kubernetes.io/group.name" = local.ingress_group_name + "alb.ingress.kubernetes.io/backend-protocol" = "HTTP" + "alb.ingress.kubernetes.io/listen-ports" = "[{\"HTTP\": 80},{\"HTTPS\":443}]" + "alb.ingress.kubernetes.io/ssl-redirect" = "443" + "alb.ingress.kubernetes.io/target-type" = "ip" + } + hosts = [ + local.ingress_host_name + ] + tls = [ + { + secretName = "${module.this.id}-tls" + hosts = [local.ingress_host_name] + } + ] + } + } + } + } + + scrape_config = join("\n", [for scrape_config_file in var.scrape_configs : file("${path.module}/${scrape_config_file}")]) + scrape_config_chart_values = { + config = { + snippets = { + scrapeConfigs = local.scrape_config + } + } + } +} + +data "aws_ssm_parameter" "basic_auth_password" { + count = local.basic_auth_enabled ? 1 : 0 + + name = module.loki.outputs.ssm_path_basic_auth_password +} + +module "chart_values" { + source = "cloudposse/config/yaml//modules/deepmerge" + version = "1.0.2" + + count = local.enabled ? 1 : 0 + + maps = [ + local.loki_write_chart_values, + jsondecode(local.push_api_enabled ? jsonencode(local.push_api_chart_values) : jsonencode({})), + local.scrape_config_chart_values, + var.chart_values + ] +} + +module "promtail" { + source = "cloudposse/helm-release/aws" + version = "0.10.1" + + enabled = local.enabled + + name = local.name + chart = var.chart + description = var.chart_description + repository = var.chart_repository + chart_version = var.chart_version + + kubernetes_namespace = var.kubernetes_namespace + create_namespace = var.create_namespace + + verify = var.verify + wait = var.wait + atomic = var.atomic + cleanup_on_fail = var.cleanup_on_fail + timeout = var.timeout + + eks_cluster_oidc_issuer_url = replace(module.eks.outputs.eks_cluster_identity_oidc_issuer, "https://", "") + + values = compact([ + yamlencode(module.chart_values[0].merged), + ]) + + context = module.this.context +} diff --git a/modules/eks/promtail/outputs.tf b/modules/eks/promtail/outputs.tf new file mode 100644 index 000000000..e69de29bb diff --git a/modules/eks/promtail/provider-helm.tf b/modules/eks/promtail/provider-helm.tf new file mode 100644 index 000000000..64459d4f4 --- /dev/null +++ b/modules/eks/promtail/provider-helm.tf @@ -0,0 +1,166 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" +} + +variable "kubeconfig_context" { + type = string + default = "" + description = "Context to choose from the Kubernetes kube config file" +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = base64decode(local.certificate_authority_data) + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster + # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. + config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + config_context = var.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = base64decode(local.certificate_authority_data) + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster + # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. + config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + config_context = var.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/promtail/providers.tf b/modules/eks/promtail/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/eks/promtail/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/promtail/remote-state.tf b/modules/eks/promtail/remote-state.tf new file mode 100644 index 000000000..391ae4624 --- /dev/null +++ b/modules/eks/promtail/remote-state.tf @@ -0,0 +1,58 @@ +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} + +variable "loki_component_name" { + type = string + description = "The name of the eks/loki component" + default = "eks/loki" +} + +module "loki" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.loki_component_name + + context = module.this.context +} + +variable "alb_controller_ingress_group_component_name" { + type = string + description = "The name of the eks/alb-controller-ingress-group component. This should be an internal facing ALB" + default = "eks/alb-controller-ingress-group" +} + +module "alb_controller_ingress_group" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.push_api_enabled ? 1 : 0 + + component = var.alb_controller_ingress_group_component_name + + context = module.this.context +} + +module "dns_gbl_delegated" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.push_api_enabled ? 1 : 0 + + environment = "gbl" + component = "dns-delegated" + + context = module.this.context +} diff --git a/modules/eks/promtail/scrape_config/default_kubernetes_pods.yaml b/modules/eks/promtail/scrape_config/default_kubernetes_pods.yaml new file mode 100644 index 000000000..23e58bc6c --- /dev/null +++ b/modules/eks/promtail/scrape_config/default_kubernetes_pods.yaml @@ -0,0 +1,40 @@ +# See also https://github.com/grafana/loki/blob/master/production/ksonnet/promtail/scrape_config.libsonnet for reference +- job_name: kubernetes-pods + pipeline_stages: + {{- toYaml .Values.config.snippets.pipelineStages | nindent 4 }} + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_controller_name + regex: ([0-9a-z-.]+?)(-[0-9a-f]{8,10})? + action: replace + target_label: __tmp_controller_name + - source_labels: + - __meta_kubernetes_pod_label_app_kubernetes_io_name + - __meta_kubernetes_pod_label_app + - __tmp_controller_name + - __meta_kubernetes_pod_name + regex: ^;*([^;]+)(;.*)?$ + action: replace + target_label: app + - source_labels: + - __meta_kubernetes_pod_label_app_kubernetes_io_instance + - __meta_kubernetes_pod_label_instance + regex: ^;*([^;]+)(;.*)?$ + action: replace + target_label: instance + - source_labels: + - __meta_kubernetes_pod_label_app_kubernetes_io_component + - __meta_kubernetes_pod_label_component + regex: ^;*([^;]+)(;.*)?$ + action: replace + target_label: component + {{- if .Values.config.snippets.addScrapeJobLabel }} + - replacement: kubernetes-pods + target_label: scrape_job + {{- end }} + {{- toYaml .Values.config.snippets.common | nindent 4 }} + {{- with .Values.config.snippets.extraRelabelConfigs }} + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/modules/eks/promtail/variables.tf b/modules/eks/promtail/variables.tf new file mode 100644 index 000000000..9acae3479 --- /dev/null +++ b/modules/eks/promtail/variables.tf @@ -0,0 +1,99 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "chart_description" { + type = string + description = "Set release description attribute (visible in the history)." + default = "Promtail is an agent which ships the contents of local logs to a Loki instance" +} + +variable "chart" { + type = string + description = "Chart name to be installed. The chart name can be local path, a URL to a chart, or the name of the chart if `repository` is specified. It is also possible to use the `/` format here if you are running Terraform on a system that the repository has been added to with `helm repo add` but this is not recommended." + default = "promtail" +} + +variable "chart_repository" { + type = string + description = "Repository URL where to locate the requested chart." + default = "https://grafana.github.io/helm-charts" +} + +variable "chart_version" { + type = string + description = "Specify the exact chart version to install. If this is not specified, the latest version is installed." + default = null +} + +variable "kubernetes_namespace" { + type = string + description = "Kubernetes namespace to install the release into" + default = "monitoring" +} + +variable "create_namespace" { + type = bool + description = "Create the Kubernetes namespace if it does not yet exist" + default = true +} + +variable "verify" { + type = bool + description = "Verify the package before installing it. Helm uses a provenance file to verify the integrity of the chart; this must be hosted alongside the chart" + default = false +} + +variable "wait" { + type = bool + description = "Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout`. Defaults to `true`." + default = true +} + +variable "atomic" { + type = bool + description = "If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used." + default = true +} + +variable "cleanup_on_fail" { + type = bool + description = "Allow deletion of new resources created in this upgrade when upgrade fails." + default = true +} + +variable "timeout" { + type = number + description = "Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). Defaults to `300` seconds" + default = 300 +} + +variable "chart_values" { + type = any + description = "Additional values to yamlencode as `helm_release` values." + default = {} +} + +variable "push_api" { + type = object({ + enabled = optional(bool, false) + scrape_config = optional(string, "") + }) + description = <<-EOT + Describes and configures Promtail to expose a Loki push API server with an Ingress configuration. + + - enabled: Set this to `true` to enable this feature + - scrape_config: Optional. This component includes a basic configuration by default, or override the default configuration here. + + EOT + default = {} +} + +variable "scrape_configs" { + type = list(string) + description = "A list of local path paths starting with this component's base path for Promtail Scrape Configs" + default = [ + "scrape_config/default_kubernetes_pods.yaml" + ] +} diff --git a/modules/eks/promtail/versions.tf b/modules/eks/promtail/versions.tf new file mode 100644 index 000000000..fb8857fab --- /dev/null +++ b/modules/eks/promtail/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.7.1, != 2.21.0" + } + } +} diff --git a/modules/eks/redis-operator/README.md b/modules/eks/redis-operator/README.md index 28768a7f5..0504d982b 100644 --- a/modules/eks/redis-operator/README.md +++ b/modules/eks/redis-operator/README.md @@ -1,6 +1,16 @@ +--- +tags: + - component/eks/redis-operator + - layer/eks + - layer/data + - provider/aws + - provider/helm +--- + # Component: `eks/redis-operator` -This component installs `redis-operator` for EKS clusters. Redis Operator creates/configures/manages high availability redis with sentinel automatic failover atop Kubernetes. +This component installs `redis-operator` for EKS clusters. Redis Operator creates/configures/manages high availability +redis with sentinel automatic failover atop Kubernetes. ## Usage @@ -46,7 +56,6 @@ components: image: repository: quay.io/spotahome/redis-operator tag: v1.1.1 - ``` `stacks/catalog/eks/redis-operator/dev` file (derived component for "dev" specific settings): @@ -63,31 +72,33 @@ components: inherits: - eks/redis-operator/defaults vars: {} - ``` + + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0, != 2.21.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.0, != 2.21.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [redis\_operator](#module\_redis\_operator) | cloudposse/helm-release/aws | 0.5.0 | +| [redis\_operator](#module\_redis\_operator) | cloudposse/helm-release/aws | 0.10.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -117,17 +128,16 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -154,7 +164,9 @@ components: |------|-------------| | [metadata](#output\_metadata) | Block status of the deployed release | + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eks/redis-operator) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/redis-operator) - + Cloud Posse's upstream component diff --git a/modules/eks/redis-operator/default.auto.tfvars b/modules/eks/redis-operator/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/eks/redis-operator/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/eks/redis-operator/main.tf b/modules/eks/redis-operator/main.tf index a91e0e8f8..ec1793768 100644 --- a/modules/eks/redis-operator/main.tf +++ b/modules/eks/redis-operator/main.tf @@ -14,7 +14,7 @@ resource "kubernetes_namespace" "default" { module "redis_operator" { source = "cloudposse/helm-release/aws" - version = "0.5.0" + version = "0.10.0" chart = var.chart repository = var.chart_repository diff --git a/modules/eks/redis-operator/provider-helm.tf b/modules/eks/redis-operator/provider-helm.tf index 20e4d3837..91cc7f6d4 100644 --- a/modules/eks/redis-operator/provider-helm.tf +++ b/modules/eks/redis-operator/provider-helm.tf @@ -2,6 +2,12 @@ # # This file is a drop-in to provide a helm provider. # +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# # All the following variables are just about configuring the Kubernetes provider # to be able to modify EKS cluster. The reason there are so many options is # because at various times, each one of them has had problems, so we give you a choice. @@ -15,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -36,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -45,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -95,14 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] - certificate_authority_data = module.eks.outputs.eks_cluster_certificate_authority_data - eks_cluster_id = module.eks.outputs.eks_cluster_id - eks_cluster_endpoint = module.eks.outputs.eks_cluster_endpoint + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -113,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -132,21 +174,22 @@ provider "helm" { } } experiments { - manifest = var.helm_manifest_experiment_enabled + manifest = var.helm_manifest_experiment_enabled && module.this.enabled } } provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/redis-operator/providers.tf b/modules/eks/redis-operator/providers.tf index 74ff8e62c..89ed50a98 100644 --- a/modules/eks/redis-operator/providers.tf +++ b/modules/eks/redis-operator/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/redis-operator/remote-state.tf b/modules/eks/redis-operator/remote-state.tf index 6ef90fd26..c1ec8226d 100644 --- a/modules/eks/redis-operator/remote-state.tf +++ b/modules/eks/redis-operator/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/redis-operator/versions.tf b/modules/eks/redis-operator/versions.tf index 58318d20e..14c085342 100644 --- a/modules/eks/redis-operator/versions.tf +++ b/modules/eks/redis-operator/versions.tf @@ -4,11 +4,15 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } helm = { source = "hashicorp/helm" version = ">= 2.0" } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0, != 2.21.0" + } } } diff --git a/modules/eks/redis/README.md b/modules/eks/redis/README.md index 7435b4d93..6bf7feac3 100644 --- a/modules/eks/redis/README.md +++ b/modules/eks/redis/README.md @@ -1,3 +1,11 @@ +--- +tags: + - component/eks/redis + - layer/data + - provider/aws + - provider/helm +--- + # Component: `eks/redis` This component installs `redis` for EKS clusters. This is a Self Hosted Redis Cluster installed on EKS. @@ -8,7 +16,6 @@ This component installs `redis` for EKS clusters. This is a Self Hosted Redis Cl Use this in the catalog or use these variables to overwrite the catalog values. - `stacks/catalog/eks/redis/defaults` file (base component for default Redis settings): ```yaml @@ -51,7 +58,6 @@ components: # Disabling Manifest Experiment disables stored metadata with Terraform state # Otherwise, the state will show changes on all plans helm_manifest_experiment_enabled: false - ``` `stacks/catalog/eks/redis/dev` file (derived component for "dev" specific settings): @@ -68,32 +74,33 @@ components: inherits: - eks/redis/defaults vars: {} - ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0, != 2.21.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [kubernetes](#provider\_kubernetes) | n/a | +| [aws](#provider\_aws) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.0, != 2.21.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [redis](#module\_redis) | cloudposse/helm-release/aws | 0.5.0 | +| [redis](#module\_redis) | cloudposse/helm-release/aws | 0.10.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -123,17 +130,16 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -160,7 +166,9 @@ components: |------|-------------| | [metadata](#output\_metadata) | Block status of the deployed release | + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eks/redis) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/redis) - + Cloud Posse's upstream component diff --git a/modules/eks/redis/default.auto.tfvars b/modules/eks/redis/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/eks/redis/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/eks/redis/main.tf b/modules/eks/redis/main.tf index f612b45a8..6e6f64305 100644 --- a/modules/eks/redis/main.tf +++ b/modules/eks/redis/main.tf @@ -14,7 +14,7 @@ resource "kubernetes_namespace" "default" { module "redis" { source = "cloudposse/helm-release/aws" - version = "0.5.0" + version = "0.10.0" chart = var.chart repository = var.chart_repository diff --git a/modules/eks/redis/provider-helm.tf b/modules/eks/redis/provider-helm.tf index 20e4d3837..91cc7f6d4 100644 --- a/modules/eks/redis/provider-helm.tf +++ b/modules/eks/redis/provider-helm.tf @@ -2,6 +2,12 @@ # # This file is a drop-in to provide a helm provider. # +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# # All the following variables are just about configuring the Kubernetes provider # to be able to modify EKS cluster. The reason there are so many options is # because at various times, each one of them has had problems, so we give you a choice. @@ -15,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -36,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -45,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -95,14 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] - certificate_authority_data = module.eks.outputs.eks_cluster_certificate_authority_data - eks_cluster_id = module.eks.outputs.eks_cluster_id - eks_cluster_endpoint = module.eks.outputs.eks_cluster_endpoint + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -113,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -132,21 +174,22 @@ provider "helm" { } } experiments { - manifest = var.helm_manifest_experiment_enabled + manifest = var.helm_manifest_experiment_enabled && module.this.enabled } } provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/redis/providers.tf b/modules/eks/redis/providers.tf index 74ff8e62c..89ed50a98 100644 --- a/modules/eks/redis/providers.tf +++ b/modules/eks/redis/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/redis/remote-state.tf b/modules/eks/redis/remote-state.tf index 6ef90fd26..c1ec8226d 100644 --- a/modules/eks/redis/remote-state.tf +++ b/modules/eks/redis/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/redis/versions.tf b/modules/eks/redis/versions.tf index 58318d20e..14c085342 100644 --- a/modules/eks/redis/versions.tf +++ b/modules/eks/redis/versions.tf @@ -4,11 +4,15 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } helm = { source = "hashicorp/helm" version = ">= 2.0" } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0, != 2.21.0" + } } } diff --git a/modules/eks/reloader/README.md b/modules/eks/reloader/README.md index f921e50af..9e720e55f 100644 --- a/modules/eks/reloader/README.md +++ b/modules/eks/reloader/README.md @@ -1,8 +1,15 @@ +--- +tags: + - component/eks/reloader + - layer/eks + - provider/aws + - provider/helm +--- + # Component: `eks/reloader` -This component installs the [Stakater Reloader](https://github.com/stakater/Reloader) for EKS clusters. -`reloader` can watch `ConfigMap`s and `Secret`s for changes -and use these to trigger rolling upgrades on pods and their associated +This component installs the [Stakater Reloader](https://github.com/stakater/Reloader) for EKS clusters. `reloader` can +watch `ConfigMap`s and `Secret`s for changes and use these to trigger rolling upgrades on pods and their associated `DeploymentConfig`s, `Deployment`s, `Daemonset`s `Statefulset`s and `Rollout`s. ## Usage @@ -29,6 +36,7 @@ components: timeout: 180 ``` + ## Requirements @@ -37,7 +45,7 @@ components: | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.9.0 | | [helm](#requirement\_helm) | >= 2.0 | -| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1, != 2.21.0 | ## Providers @@ -45,13 +53,13 @@ components: |------|---------| | [aws](#provider\_aws) | >= 4.9.0 | | [helm](#provider\_helm) | >= 2.0 | -| [kubernetes](#provider\_kubernetes) | >= 2.7.1 | +| [kubernetes](#provider\_kubernetes) | >= 2.7.1, != 2.21.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -81,17 +89,16 @@ components: | [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | | [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | @@ -118,7 +125,9 @@ components: |------|-------------| | [metadata](#output\_metadata) | Block status of the deployed release | + ## References -* https://github.com/stakater/Reloader -* https://github.com/stakater/Reloader/tree/master/deployments/kubernetes/chart/reloader + +- https://github.com/stakater/Reloader +- https://github.com/stakater/Reloader/tree/master/deployments/kubernetes/chart/reloader diff --git a/modules/eks/reloader/provider-helm.tf b/modules/eks/reloader/provider-helm.tf index 20e4d3837..91cc7f6d4 100644 --- a/modules/eks/reloader/provider-helm.tf +++ b/modules/eks/reloader/provider-helm.tf @@ -2,6 +2,12 @@ # # This file is a drop-in to provide a helm provider. # +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# # All the following variables are just about configuring the Kubernetes provider # to be able to modify EKS cluster. The reason there are so many options is # because at various times, each one of them has had problems, so we give you a choice. @@ -15,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -36,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -45,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool - default = true + default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -95,14 +133,17 @@ locals { "--profile", var.kube_exec_auth_aws_profile ] : [] - kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, var.import_role_arn, module.iam_roles.terraform_role_arn) + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ "--role-arn", local.kube_exec_auth_role_arn ] : [] - certificate_authority_data = module.eks.outputs.eks_cluster_certificate_authority_data - eks_cluster_id = module.eks.outputs.eks_cluster_id - eks_cluster_endpoint = module.eks.outputs.eks_cluster_endpoint + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -113,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -132,21 +174,22 @@ provider "helm" { } } experiments { - manifest = var.helm_manifest_experiment_enabled + manifest = var.helm_manifest_experiment_enabled && module.this.enabled } } provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" diff --git a/modules/eks/reloader/providers.tf b/modules/eks/reloader/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/eks/reloader/providers.tf +++ b/modules/eks/reloader/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/eks/reloader/remote-state.tf b/modules/eks/reloader/remote-state.tf index 90c6ab1a8..c1ec8226d 100644 --- a/modules/eks/reloader/remote-state.tf +++ b/modules/eks/reloader/remote-state.tf @@ -1,6 +1,6 @@ module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.eks_component_name diff --git a/modules/eks/reloader/versions.tf b/modules/eks/reloader/versions.tf index c8087b1b8..61ea676a2 100644 --- a/modules/eks/reloader/versions.tf +++ b/modules/eks/reloader/versions.tf @@ -12,7 +12,7 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.7.1" + version = ">= 2.7.1, != 2.21.0" } } } diff --git a/modules/eks/storage-class/README.md b/modules/eks/storage-class/README.md new file mode 100644 index 000000000..a9c64d06e --- /dev/null +++ b/modules/eks/storage-class/README.md @@ -0,0 +1,216 @@ +--- +tags: + - component/eks + - layer/eks + - layer/data + - provider/aws + - provider/helm +--- + +# Component: `eks/storage-class` + +This component is responsible for provisioning `StorageClasses` in an EKS cluster. See the list of guides and references +linked at the bottom of this README for more information. + +A StorageClass provides part of the configuration for a PersistentVolumeClaim, which copies the configuration when it is +created. Thus, you can delete a StorageClass without affecting existing PersistentVolumeClaims, and changes to a +StorageClass do not propagate to existing PersistentVolumeClaims. + +## Usage + +**Stack Level**: Regional, per cluster + +This component can create storage classes backed by EBS or EFS, and is intended to be used with the corresponding EKS +add-ons `aws-ebs-csi-driver` and `aws-efs-csi-driver` respectively. In the case of EFS, this component also requires +that you have provisioned an EFS filesystem in the same region as your cluster, and expects you have used the `efs` +(previously `eks/efs`) component to do so. The EFS storage classes will get the file system ID from the EFS component's +output. + +### Note: Default Storage Class + +Exactly one StorageClass can be designated as the default StorageClass for a cluster. This default StorageClass is then +used by PersistentVolumeClaims that do not specify a storage class. + +Prior to Kubernetes 1.26, if more than one StorageClass is marked as default, a PersistentVolumeClaim without +`storageClassName` explicitly specified cannot be created. In Kubernetes 1.26 and later, if more than one StorageClass +is marked as default, the last one created will be used, which means you can get by with just ignoring the default "gp2" +StorageClass that EKS creates for you. + +EKS always creates a default storage class for the cluster, typically an EBS backed class named `gp2`. Find out what the +default storage class is for your cluster by running this command: + +```bash +# You only need to run `set-cluster` when you are changing target clusters +set-cluster admin # replace admin with other role name if desired +kubectl get storageclass +``` + +This will list the available storage classes, with the default one marked with `(default)` next to its name. + +If you want to change the default, you can unset the existing default manually, like this: + +```bash +SC_NAME=gp2 # Replace with the name of the storage class you want to unset as default +# You only need to run `set-cluster` when you are changing target clusters +set-cluster admin # replace admin with other role name if desired +kubectl patch storageclass $SC_NAME -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' +``` + +Or you can import the existing default storage class into Terraform and manage or delete it entirely, like this: + +```bash +SC_NAME=gp2 # Replace with the name of the storage class you want to unset as default +atmos terraform import eks/storage-class 'kubernetes_storage_class_v1.ebs["'${SC_NAME}'"]' $SC_NAME -s=core-usw2-dev +``` + +View the parameters of a storage class by running this command: + +```bash +SC_NAME=gp2 # Replace with the name of the storage class you want to view +# You only need to run `set-cluster` when you are changing target clusters +set-cluster admin # replace admin with other role name if desired +kubectl get storageclass $SC_NAME -o yaml +``` + +You can then match that configuration, except that you cannot omit `allow_volume_exansion`. + +```yaml +ebs_storage_classes: + gp2: + make_default_storage_class: true + include_tags: false + # Preserve values originally set by eks/cluster. + # Set to "" to omit. + provisioner: kubernetes.io/aws-ebs + parameters: + type: gp2 + encrypted: "" +``` + +Here's an example snippet for how to use this component. + +```yaml +eks/storage-class: + vars: + ebs_storage_classes: + gp2: + make_default_storage_class: false + include_tags: false + # Preserve values originally set by eks/cluster. + # Set to "" to omit. + provisioner: kubernetes.io/aws-ebs + parameters: + type: gp2 + encrypted: "" + gp3: + make_default_storage_class: true + parameters: + type: gp3 + efs_storage_classes: + efs-sc: + make_default_storage_class: false + efs_component_name: "efs" # Replace with the name of the EFS component, previously "eks/efs" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [helm](#requirement\_helm) | >= 2.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.22.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.22.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [efs](#module\_efs) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_storage_class_v1.ebs](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class_v1) | resource | +| [kubernetes_storage_class_v1.efs](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class_v1) | resource | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [ebs\_storage\_classes](#input\_ebs\_storage\_classes) | A map of storage class name to EBS parameters to create |
map(object({
make_default_storage_class = optional(bool, false)
include_tags = optional(bool, true) # If true, StorageClass will set our tags on created EBS volumes
labels = optional(map(string), null)
reclaim_policy = optional(string, "Delete")
volume_binding_mode = optional(string, "WaitForFirstConsumer")
mount_options = optional(list(string), null)
# Allowed topologies are poorly documented, and poorly implemented.
# According to the API spec https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#storageclass-v1-storage-k8s-io
# it should be a list of objects with a `matchLabelExpressions` key, which is a list of objects with `key` and `values` keys.
# However, the Terraform resource only allows a single object in a matchLabelExpressions block, not a list,
# the EBS driver appears to only allow a single matchLabelExpressions block, and it is entirely unclear
# what should happen if either of the lists has more than one element.
# So we simplify it here to be singletons, not lists, and allow for a future change to the resource to support lists,
# and a future replacement for this flattened object which can maintain backward compatibility.
allowed_topologies_match_label_expressions = optional(object({
key = optional(string, "topology.ebs.csi.aws.com/zone")
values = list(string)
}), null)
allow_volume_expansion = optional(bool, true)
# parameters, see https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md
parameters = object({
fstype = optional(string, "ext4") # "csi.storage.k8s.io/fstype"
type = optional(string, "gp3")
iopsPerGB = optional(string, null)
allowAutoIOPSPerGBIncrease = optional(string, null) # "true" or "false"
iops = optional(string, null)
throughput = optional(string, null)

encrypted = optional(string, "true")
kmsKeyId = optional(string, null) # ARN of the KMS key to use for encryption. If not specified, the default key is used.
blockExpress = optional(string, null) # "true" or "false"
blockSize = optional(string, null)
})
provisioner = optional(string, "ebs.csi.aws.com")

# TODO: support tags
# https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/tagging.md
}))
| `{}` | no | +| [efs\_storage\_classes](#input\_efs\_storage\_classes) | A map of storage class name to EFS parameters to create |
map(object({
make_default_storage_class = optional(bool, false)
labels = optional(map(string), null)
efs_component_name = optional(string, "eks/efs")
reclaim_policy = optional(string, "Delete")
volume_binding_mode = optional(string, "Immediate")
# Mount options are poorly documented.
# TLS is now the default and need not be specified. https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/docs#encryption-in-transit
# Other options include `lookupcache` and `iam`.
mount_options = optional(list(string), null)
parameters = optional(object({
basePath = optional(string, "/efs_controller")
directoryPerms = optional(string, "700")
provisioningMode = optional(string, "efs-ap")
gidRangeStart = optional(string, null)
gidRangeEnd = optional(string, null)
# Support for cross-account EFS mounts
# See https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/examples/kubernetes/cross_account_mount
# and for gritty details on secrets: https://kubernetes-csi.github.io/docs/secrets-and-credentials-storage-class.html
az = optional(string, null)
provisioner-secret-name = optional(string, null) # "csi.storage.k8s.io/provisioner-secret-name"
provisioner-secret-namespace = optional(string, null) # "csi.storage.k8s.io/provisioner-secret-namespace"
}), {})
provisioner = optional(string, "efs.csi.aws.com")
}))
| `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the EKS component for the cluster in which to create the storage classes | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region. | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [storage\_classes](#output\_storage\_classes) | Storage classes created by this module | + + + +## Related How-to Guides + +- [EBS CSI Migration FAQ](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi-migration-faq.html) +- [Migrating Clusters From gp2 to gp3 EBS Volumes](https://aws.amazon.com/blogs/containers/migrating-amazon-eks-clusters-from-gp2-to-gp3-ebs-volumes/) +- [Kubernetes: Change the Default StorageClass](https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/) + +## References + +- [Kubernetes Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes) +- +- [EBS CSI driver (Amazon)](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) +- [EBS CSI driver (GitHub)](https://github.com/kubernetes-sigs/aws-ebs-csi-driver#documentation) +- [EBS CSI StorageClass Parameters](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md) +- [EFS CSI driver (Amazon)](https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html) +- [EFS CSI driver (GitHub)](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/docs/README.md#examples) +- [EFS CSI StorageClass Parameters](https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/docs#storage-class-parameters-for-dynamic-provisioning) +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/cluster) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/eks/storage-class/context.tf b/modules/eks/storage-class/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/storage-class/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/storage-class/main.tf b/modules/eks/storage-class/main.tf new file mode 100644 index 000000000..e4abdd8fb --- /dev/null +++ b/modules/eks/storage-class/main.tf @@ -0,0 +1,88 @@ +locals { + enabled = module.this.enabled + + efs_components = local.enabled ? toset([for k, v in var.efs_storage_classes : v.efs_component_name]) : [] + + # In order to use `optional()`, the variable must be an object, but + # object keys must be valid identifiers and cannot be like "csi.storage.k8s.io/fstype" + # See https://github.com/hashicorp/terraform/issues/22681 + # So we have to convert the object to a map with the keys the StorageClass expects + ebs_key_map = { + fstype = "csi.storage.k8s.io/fstype" + } + old_ebs_key_map = { + fstype = "fsType" + } + + efs_key_map = { + provisioner-secret-name = "csi.storage.k8s.io/provisioner-secret-name" + provisioner-secret-namespace = "csi.storage.k8s.io/provisioner-secret-namespace" + } + + # Tag with cluster name rather than just stage ID. + tags = merge(module.this.tags, { Name = module.eks.outputs.eks_cluster_id }) +} + +resource "kubernetes_storage_class_v1" "ebs" { + for_each = local.enabled ? var.ebs_storage_classes : {} + + metadata { + name = each.key + annotations = { + "storageclass.kubernetes.io/is-default-class" = each.value.make_default_storage_class ? "true" : "false" + } + labels = each.value.labels + } + + # Tags are implemented via parameters. We use "tagSpecification_n" as the key, starting at 1. + # See https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/tagging.md#storageclass-tagging + parameters = merge({ for k, v in each.value.parameters : ( + # provisioner kubernetes.io/aws-ebs uses the key "fsType" instead of "csi.storage.k8s.io/fstype" + lookup((each.value.provisioner == "kubernetes.io/aws-ebs" ? local.old_ebs_key_map : local.ebs_key_map), k, k)) => v if v != null && v != "" }, + each.value.include_tags ? { for i, k in keys(local.tags) : "tagSpecification_${i + 1}" => "${k}=${local.tags[k]}" } : {}, + ) + + storage_provisioner = each.value.provisioner + reclaim_policy = each.value.reclaim_policy + volume_binding_mode = each.value.volume_binding_mode + mount_options = each.value.mount_options + + # Allowed topologies are poorly documented, and poorly implemented. + # According to the API spec https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#storageclass-v1-storage-k8s-io + # it should be a list of objects with a `matchLabelExpressions` key, which is a list of objects with `key` and `values` keys. + # However, the Terraform resource only allows a single object in a matchLabelExpressions block, not a list,, + # the EBS driver appears to only allow a single matchLabelExpressions block, and it is entirely unclear + # what should happen if either of the lists has more than one element. So we simplify it here to be singletons, not lists. + dynamic "allowed_topologies" { + for_each = each.value.allowed_topologies_match_label_expressions != null ? ["zones"] : [] + content { + match_label_expressions { + key = each.value.allowed_topologies_match_label_expressions.key + values = each.value.allowed_topologies_match_label_expressions.values + } + } + } + + # Unfortunately, the provider always sets allow_volume_expansion to something whether you provide it or not. + # There is no way to omit it. + allow_volume_expansion = each.value.allow_volume_expansion +} + +resource "kubernetes_storage_class_v1" "efs" { + for_each = local.enabled ? var.efs_storage_classes : {} + + metadata { + name = each.key + annotations = { + "storageclass.kubernetes.io/is-default-class" = each.value.make_default_storage_class ? "true" : "false" + } + labels = each.value.labels + } + parameters = merge({ fileSystemId = module.efs[each.value.efs_component_name].outputs.efs_id }, + { for k, v in each.value.parameters : lookup(local.efs_key_map, k, k) => v if v != null && v != "" }) + + storage_provisioner = each.value.provisioner + reclaim_policy = each.value.reclaim_policy + volume_binding_mode = each.value.volume_binding_mode + mount_options = each.value.mount_options +} diff --git a/modules/eks/storage-class/outputs.tf b/modules/eks/storage-class/outputs.tf new file mode 100644 index 000000000..5d7a7e70f --- /dev/null +++ b/modules/eks/storage-class/outputs.tf @@ -0,0 +1,4 @@ +output "storage_classes" { + value = merge(kubernetes_storage_class_v1.ebs, kubernetes_storage_class_v1.efs) + description = "Storage classes created by this module" +} diff --git a/modules/eks/storage-class/provider-helm.tf b/modules/eks/storage-class/provider-helm.tf new file mode 100644 index 000000000..91cc7f6d4 --- /dev/null +++ b/modules/eks/storage-class/provider-helm.tf @@ -0,0 +1,201 @@ +################## +# +# This file is a drop-in to provide a helm provider. +# +# It depends on 2 standard Cloud Posse data source modules to be already +# defined in the same component: +# +# 1. module.iam_roles to provide the AWS profile or Role ARN to use to access the cluster +# 2. module.eks to provide the EKS cluster information +# +# All the following variables are just about configuring the Kubernetes provider +# to be able to modify EKS cluster. The reason there are so many options is +# because at various times, each one of them has had problems, so we give you a choice. +# +# The reason there are so many "enabled" inputs rather than automatically +# detecting whether or not they are enabled based on the value of the input +# is that any logic based on input values requires the values to be known during +# the "plan" phase of Terraform, and often they are not, which causes problems. +# +variable "kubeconfig_file_enabled" { + type = bool + default = false + description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false +} + +variable "kubeconfig_file" { + type = string + default = "" + description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false +} + +variable "kubeconfig_context" { + type = string + default = "" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false +} + +variable "kube_data_auth_enabled" { + type = bool + default = false + description = <<-EOT + If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_enabled" { + type = bool + default = true + description = <<-EOT + If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. + Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. + EOT + nullable = false +} + +variable "kube_exec_auth_role_arn" { + type = string + default = "" + description = "The role ARN for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_role_arn_enabled" { + type = bool + default = true + description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false +} + +variable "kube_exec_auth_aws_profile" { + type = string + default = "" + description = "The AWS config profile for `aws eks get-token` to use" + nullable = false +} + +variable "kube_exec_auth_aws_profile_enabled" { + type = bool + default = false + description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false +} + +variable "kubeconfig_exec_auth_api_version" { + type = string + default = "client.authentication.k8s.io/v1beta1" + description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false +} + +variable "helm_manifest_experiment_enabled" { + type = bool + default = false + description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false +} + +locals { + kubeconfig_file_enabled = var.kubeconfig_file_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + + # Eventually we might try to get this from an environment variable + kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version + + exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? [ + "--profile", var.kube_exec_auth_aws_profile + ] : [] + + kube_exec_auth_role_arn = coalesce(var.kube_exec_auth_role_arn, module.iam_roles.terraform_role_arn) + exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? [ + "--role-arn", local.kube_exec_auth_role_arn + ] : [] + + # Provide dummy configuration for the case where the EKS cluster is not available. + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) + # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. + eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") +} + +data "aws_eks_cluster_auth" "eks" { + count = local.kube_data_auth_enabled ? 1 : 0 + name = local.eks_cluster_id +} + +provider "helm" { + kubernetes { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } + } + experiments { + manifest = var.helm_manifest_experiment_enabled && module.this.enabled + } +} + +provider "kubernetes" { + host = local.eks_cluster_endpoint + cluster_ca_certificate = local.cluster_ca_certificate + token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context + + dynamic "exec" { + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] + content { + api_version = local.kubeconfig_exec_auth_api_version + command = "aws" + args = concat(local.exec_profile, [ + "eks", "get-token", "--cluster-name", local.eks_cluster_id + ], local.exec_role) + } + } +} diff --git a/modules/eks/storage-class/providers.tf b/modules/eks/storage-class/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/eks/storage-class/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eks/storage-class/remote-state.tf b/modules/eks/storage-class/remote-state.tf new file mode 100644 index 000000000..e4db4d0b2 --- /dev/null +++ b/modules/eks/storage-class/remote-state.tf @@ -0,0 +1,19 @@ +module "efs" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + for_each = local.efs_components + + component = each.value + + context = module.this.context +} + +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.eks_component_name + + context = module.this.context +} diff --git a/modules/eks/storage-class/variables.tf b/modules/eks/storage-class/variables.tf new file mode 100644 index 000000000..597970e54 --- /dev/null +++ b/modules/eks/storage-class/variables.tf @@ -0,0 +1,87 @@ +variable "region" { + description = "AWS Region." + type = string +} + +variable "eks_component_name" { + type = string + description = "The name of the EKS component for the cluster in which to create the storage classes" + default = "eks/cluster" + nullable = false +} + +variable "ebs_storage_classes" { + type = map(object({ + make_default_storage_class = optional(bool, false) + include_tags = optional(bool, true) # If true, StorageClass will set our tags on created EBS volumes + labels = optional(map(string), null) + reclaim_policy = optional(string, "Delete") + volume_binding_mode = optional(string, "WaitForFirstConsumer") + mount_options = optional(list(string), null) + # Allowed topologies are poorly documented, and poorly implemented. + # According to the API spec https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#storageclass-v1-storage-k8s-io + # it should be a list of objects with a `matchLabelExpressions` key, which is a list of objects with `key` and `values` keys. + # However, the Terraform resource only allows a single object in a matchLabelExpressions block, not a list, + # the EBS driver appears to only allow a single matchLabelExpressions block, and it is entirely unclear + # what should happen if either of the lists has more than one element. + # So we simplify it here to be singletons, not lists, and allow for a future change to the resource to support lists, + # and a future replacement for this flattened object which can maintain backward compatibility. + allowed_topologies_match_label_expressions = optional(object({ + key = optional(string, "topology.ebs.csi.aws.com/zone") + values = list(string) + }), null) + allow_volume_expansion = optional(bool, true) + # parameters, see https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md + parameters = object({ + fstype = optional(string, "ext4") # "csi.storage.k8s.io/fstype" + type = optional(string, "gp3") + iopsPerGB = optional(string, null) + allowAutoIOPSPerGBIncrease = optional(string, null) # "true" or "false" + iops = optional(string, null) + throughput = optional(string, null) + + encrypted = optional(string, "true") + kmsKeyId = optional(string, null) # ARN of the KMS key to use for encryption. If not specified, the default key is used. + blockExpress = optional(string, null) # "true" or "false" + blockSize = optional(string, null) + }) + provisioner = optional(string, "ebs.csi.aws.com") + + # TODO: support tags + # https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/tagging.md + })) + description = "A map of storage class name to EBS parameters to create" + default = {} + nullable = false +} + +variable "efs_storage_classes" { + type = map(object({ + make_default_storage_class = optional(bool, false) + labels = optional(map(string), null) + efs_component_name = optional(string, "eks/efs") + reclaim_policy = optional(string, "Delete") + volume_binding_mode = optional(string, "Immediate") + # Mount options are poorly documented. + # TLS is now the default and need not be specified. https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/docs#encryption-in-transit + # Other options include `lookupcache` and `iam`. + mount_options = optional(list(string), null) + parameters = optional(object({ + basePath = optional(string, "/efs_controller") + directoryPerms = optional(string, "700") + provisioningMode = optional(string, "efs-ap") + gidRangeStart = optional(string, null) + gidRangeEnd = optional(string, null) + # Support for cross-account EFS mounts + # See https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/examples/kubernetes/cross_account_mount + # and for gritty details on secrets: https://kubernetes-csi.github.io/docs/secrets-and-credentials-storage-class.html + az = optional(string, null) + provisioner-secret-name = optional(string, null) # "csi.storage.k8s.io/provisioner-secret-name" + provisioner-secret-namespace = optional(string, null) # "csi.storage.k8s.io/provisioner-secret-namespace" + }), {}) + provisioner = optional(string, "efs.csi.aws.com") + })) + description = "A map of storage class name to EFS parameters to create" + default = {} + nullable = false +} diff --git a/modules/eks/karpenter-provisioner/versions.tf b/modules/eks/storage-class/versions.tf similarity index 91% rename from modules/eks/karpenter-provisioner/versions.tf rename to modules/eks/storage-class/versions.tf index 57cc9f927..fba2b45f9 100644 --- a/modules/eks/karpenter-provisioner/versions.tf +++ b/modules/eks/storage-class/versions.tf @@ -6,13 +6,13 @@ terraform { source = "hashicorp/aws" version = ">= 4.9.0" } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.22.0" + } helm = { source = "hashicorp/helm" version = ">= 2.0" } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.14.0" - } } } diff --git a/modules/eks/tailscale/README.md b/modules/eks/tailscale/README.md new file mode 100644 index 000000000..74866021b --- /dev/null +++ b/modules/eks/tailscale/README.md @@ -0,0 +1,124 @@ +# Component: eks/tailscale + +## Usage + +**Stack Level**: Regional + +Use this in the catalog or use these variables to overwrite the catalog values. + +```yaml +components: + terraform: + eks/tailscale: + vars: + enabled: true + name: tailscale + create_namespace: true + kubernetes_namespace: "tailscale" + image_repo: tailscale/k8s-operator + image_tag: unstable +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.7.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.7.1 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [store\_read](#module\_store\_read) | cloudposse/ssm-parameter-store/aws | 0.10.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_cluster_role.tailscale_operator](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/cluster_role) | resource | +| [kubernetes_cluster_role_binding.tailscale_operator](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/cluster_role_binding) | resource | +| [kubernetes_deployment.operator](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_namespace.default](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_role.operator](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/role) | resource | +| [kubernetes_role.proxies](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/role) | resource | +| [kubernetes_role_binding.operator](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/role_binding) | resource | +| [kubernetes_role_binding.proxies](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/role_binding) | resource | +| [kubernetes_secret.operator_oauth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource | +| [kubernetes_service_account.operator](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | +| [kubernetes_service_account.proxies](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | +| [aws_eks_cluster.kubernetes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_subnet.vpc_subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [chart\_values](#input\_chart\_values) | Addition map values to yamlencode as `helm_release` values. | `any` | `{}` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_namespace](#input\_create\_namespace) | Create the namespace if it does not yet exist. Defaults to `false`. | `bool` | `false` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [deployment\_name](#input\_deployment\_name) | Name of the tailscale deployment, defaults to `tailscale` if this is null | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [eks\_component\_name](#input\_eks\_component\_name) | The name of the eks component | `string` | `"eks/cluster"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [env](#input\_env) | Map of ENV vars in the format `key=value`. These ENV vars will be set in the `utils` provider before executing the data source | `map(string)` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [helm\_manifest\_experiment\_enabled](#input\_helm\_manifest\_experiment\_enabled) | Enable storing of the rendered manifest for helm\_release so the full diff of what is changing can been seen in the plan | `bool` | `true` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [image\_repo](#input\_image\_repo) | Image repository for the deployment | `string` | `"ghcr.io/tailscale/tailscale"` | no | +| [image\_tag](#input\_image\_tag) | Image Tag for the deployment. | `string` | `"latest"` | no | +| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [kube\_data\_auth\_enabled](#input\_kube\_data\_auth\_enabled) | If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. | `bool` | `false` | no | +| [kube\_exec\_auth\_aws\_profile](#input\_kube\_exec\_auth\_aws\_profile) | The AWS config profile for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_aws\_profile\_enabled](#input\_kube\_exec\_auth\_aws\_profile\_enabled) | If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token` | `bool` | `false` | no | +| [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | +| [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | +| [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | +| [kube\_secret](#input\_kube\_secret) | Kube Secret Name for tailscale | `string` | `"tailscale"` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | +| [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | +| [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | +| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The namespace to install the release into. | `string` | n/a | yes | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [routes](#input\_routes) | List of CIDR Ranges or IPs to allow Tailscale to connect to | `list(string)` | `[]` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [deployment](#output\_deployment) | Tail scale operator deployment K8S resource | + + + +## References + +- https://github.com/Ealenn/tailscale diff --git a/modules/eks/tailscale/context.tf b/modules/eks/tailscale/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eks/tailscale/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eks/tailscale/main.tf b/modules/eks/tailscale/main.tf new file mode 100644 index 000000000..f14108d47 --- /dev/null +++ b/modules/eks/tailscale/main.tf @@ -0,0 +1,265 @@ +locals { + enabled = module.this.enabled + create_namespace = local.enabled + + routes = join(",", concat(var.routes, [for k, v in data.aws_subnet.vpc_subnets : v.cidr_block])) +} + +module "store_read" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.10.0" + + parameter_read = [ + "/tailscale/client_id", + "/tailscale/client_secret", + ] +} + +resource "kubernetes_secret" "operator_oauth" { + metadata { + name = "operator-oauth" + namespace = var.kubernetes_namespace + } + data = { + client_id = module.store_read.map["/tailscale/client_id"] + client_secret = module.store_read.map["/tailscale/client_secret"] + } +} + +resource "kubernetes_namespace" "default" { + count = local.create_namespace ? 1 : 0 + + metadata { + name = var.kubernetes_namespace + + labels = module.this.tags + } +} + + +resource "kubernetes_service_account" "proxies" { + metadata { + name = "proxies" + namespace = var.kubernetes_namespace + } +} + +resource "kubernetes_role" "proxies" { + metadata { + name = "proxies" + namespace = var.kubernetes_namespace + } + + rule { + verbs = ["*"] + api_groups = [""] + resources = ["secrets"] + } +} + +resource "kubernetes_role_binding" "proxies" { + metadata { + name = "proxies" + namespace = var.kubernetes_namespace + } + + subject { + kind = "ServiceAccount" + name = "proxies" + namespace = var.kubernetes_namespace + } + + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "Role" + name = "proxies" + } +} + +resource "kubernetes_service_account" "operator" { + metadata { + name = "operator" + namespace = var.kubernetes_namespace + } +} + +resource "kubernetes_cluster_role" "tailscale_operator" { + metadata { + name = "tailscale-operator" + } + + rule { + verbs = ["*"] + api_groups = [""] + resources = ["services", "services/status"] + } +} + +resource "kubernetes_cluster_role_binding" "tailscale_operator" { + metadata { + name = "tailscale-operator" + } + + subject { + kind = "ServiceAccount" + name = "operator" + namespace = var.kubernetes_namespace + } + + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = "tailscale-operator" + } +} + +resource "kubernetes_role" "operator" { + metadata { + name = "operator" + namespace = var.kubernetes_namespace + } + + rule { + verbs = ["*"] + api_groups = [""] + resources = ["secrets"] + } + + rule { + verbs = ["*"] + api_groups = ["apps"] + resources = ["statefulsets"] + } +} + +resource "kubernetes_role_binding" "operator" { + metadata { + name = "operator" + namespace = var.kubernetes_namespace + } + + subject { + kind = "ServiceAccount" + name = "operator" + namespace = var.kubernetes_namespace + } + + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "Role" + name = "operator" + } +} + +resource "kubernetes_deployment" "operator" { + metadata { + name = coalesce(var.deployment_name, "tailscale-operator") + namespace = var.kubernetes_namespace + labels = { + app = "tailscale" + } + } + + spec { + replicas = 1 + + selector { + match_labels = { + app = "operator" + } + } + + template { + metadata { + labels = { + app = "operator" + } + } + + spec { + volume { + name = "oauth" + + secret { + secret_name = "operator-oauth" + } + } + + container { + image = format("%s:%s", var.image_repo, var.image_tag) + name = "tailscale" + + env { + name = "OPERATOR_HOSTNAME" + value = format("%s-%s-%s-%s", "tailscale-operator", var.tenant, var.environment, var.stage) + } + + env { + name = "OPERATOR_SECRET" + value = "operator" + } + + env { + name = "OPERATOR_LOGGING" + value = "info" + } + + env { + name = "OPERATOR_NAMESPACE" + + value_from { + field_ref { + field_path = "metadata.namespace" + } + } + } + + env { + name = "CLIENT_ID_FILE" + value = "/oauth/client_id" + } + + env { + name = "CLIENT_SECRET_FILE" + value = "/oauth/client_secret" + } + + env { + name = "PROXY_IMAGE" + value = "tailscale/tailscale:unstable" + } + + env { + name = "PROXY_TAGS" + value = "tag:k8s" + } + + env { + name = "AUTH_PROXY" + value = "false" + } + + resources { + requests = { + cpu = "500m" + + memory = "100Mi" + } + } + + volume_mount { + name = "oauth" + read_only = true + mount_path = "/oauth" + } + } + + service_account_name = "operator" + } + } + + strategy { + type = "Recreate" + } + } +} diff --git a/modules/eks/tailscale/outputs.tf b/modules/eks/tailscale/outputs.tf new file mode 100644 index 000000000..811fe1ff4 --- /dev/null +++ b/modules/eks/tailscale/outputs.tf @@ -0,0 +1,4 @@ +output "deployment" { + value = kubernetes_deployment.operator + description = "Tail scale operator deployment K8S resource" +} diff --git a/modules/datadog-agent/provider-helm.tf b/modules/eks/tailscale/provider-kubernetes.tf similarity index 83% rename from modules/datadog-agent/provider-helm.tf rename to modules/eks/tailscale/provider-kubernetes.tf index 20e4d3837..00cfd1542 100644 --- a/modules/datadog-agent/provider-helm.tf +++ b/modules/eks/tailscale/provider-kubernetes.tf @@ -110,32 +110,6 @@ data "aws_eks_cluster_auth" "eks" { name = local.eks_cluster_id } -provider "helm" { - kubernetes { - host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) - token = local.kube_data_auth_enabled ? data.aws_eks_cluster_auth.eks[0].token : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context - - dynamic "exec" { - for_each = local.kube_exec_auth_enabled ? ["exec"] : [] - content { - api_version = local.kubeconfig_exec_auth_api_version - command = "aws" - args = concat(local.exec_profile, [ - "eks", "get-token", "--cluster-name", local.eks_cluster_id - ], local.exec_role) - } - } - } - experiments { - manifest = var.helm_manifest_experiment_enabled - } -} - provider "kubernetes" { host = local.eks_cluster_endpoint cluster_ca_certificate = base64decode(local.certificate_authority_data) diff --git a/modules/eks/karpenter-provisioner/providers.tf b/modules/eks/tailscale/providers.tf similarity index 100% rename from modules/eks/karpenter-provisioner/providers.tf rename to modules/eks/tailscale/providers.tf diff --git a/modules/eks/tailscale/remote-state.tf b/modules/eks/tailscale/remote-state.tf new file mode 100644 index 000000000..42155e076 --- /dev/null +++ b/modules/eks/tailscale/remote-state.tf @@ -0,0 +1,20 @@ +module "eks" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + component = var.eks_component_name + + context = module.this.context +} + +data "aws_eks_cluster" "kubernetes" { + count = local.enabled ? 1 : 0 + + name = module.eks.outputs.eks_cluster_id +} + +data "aws_subnet" "vpc_subnets" { + for_each = local.enabled ? data.aws_eks_cluster.kubernetes[0].vpc_config[0].subnet_ids : [] + + id = each.value +} diff --git a/modules/eks/tailscale/variables.tf b/modules/eks/tailscale/variables.tf new file mode 100644 index 000000000..90be9cc2a --- /dev/null +++ b/modules/eks/tailscale/variables.tf @@ -0,0 +1,63 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "eks_component_name" { + type = string + description = "The name of the eks component" + default = "eks/cluster" +} + +variable "chart_values" { + type = any + description = "Addition map values to yamlencode as `helm_release` values." + default = {} +} + +variable "deployment_name" { + type = string + description = "Name of the tailscale deployment, defaults to `tailscale` if this is null" + default = null +} + +variable "image_repo" { + type = string + description = "Image repository for the deployment" + default = "ghcr.io/tailscale/tailscale" +} + +variable "image_tag" { + type = string + description = "Image Tag for the deployment." + default = "latest" +} + +variable "create_namespace" { + type = bool + description = "Create the namespace if it does not yet exist. Defaults to `false`." + default = false +} + +variable "kubernetes_namespace" { + type = string + description = "The namespace to install the release into." +} + +variable "kube_secret" { + type = string + description = "Kube Secret Name for tailscale" + default = "tailscale" +} + +variable "routes" { + type = list(string) + description = "List of CIDR Ranges or IPs to allow Tailscale to connect to" + default = [] +} + +variable "env" { + type = map(string) + description = "Map of ENV vars in the format `key=value`. These ENV vars will be set in the `utils` provider before executing the data source" + default = null +} diff --git a/modules/eks/tailscale/versions.tf b/modules/eks/tailscale/versions.tf new file mode 100644 index 000000000..8b70f9f52 --- /dev/null +++ b/modules/eks/tailscale/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.7.1" + } + } +} diff --git a/modules/elasticache-redis/README.md b/modules/elasticache-redis/README.md index d5578e4c8..eaec1c2ae 100644 --- a/modules/elasticache-redis/README.md +++ b/modules/elasticache-redis/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/elasticache-redis + - layer/data + - provider/aws +--- + # Component: `elasticache-redis` This component is responsible for provisioning [ElastiCache Redis](https://aws.amazon.com/elasticache/redis/) clusters. @@ -18,8 +25,8 @@ components: enabled: true name: "elasticache-redis" family: redis6.x - ingress_cidr_blocks: [ ] - egress_cidr_blocks: [ "0.0.0.0/0" ] + ingress_cidr_blocks: [] + egress_cidr_blocks: ["0.0.0.0/0"] port: 6379 at_rest_encryption_enabled: true transit_encryption_enabled: false @@ -61,13 +68,14 @@ components: value: lK ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers @@ -77,13 +85,13 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [redis\_clusters](#module\_redis\_clusters) | ./modules/redis_cluster | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | -| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -95,6 +103,7 @@ No resources. |------|-------------|------|---------|:--------:| | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [allow\_all\_egress](#input\_allow\_all\_egress) | If `true`, the created security group will allow egress on all ports and protocols to all IP address.
If this is false and no egress rules are otherwise specified, then no egress will be allowed. | `bool` | `true` | no | +| [allow\_ingress\_from\_this\_vpc](#input\_allow\_ingress\_from\_this\_vpc) | If set to `true`, allow ingress from the VPC CIDR for this account | `bool` | `true` | no | | [allow\_ingress\_from\_vpc\_stages](#input\_allow\_ingress\_from\_vpc\_stages) | List of stages to pull VPC ingress cidr and add to security group | `list(string)` | `[]` | no | | [apply\_immediately](#input\_apply\_immediately) | Apply changes immediately | `bool` | n/a | yes | | [at\_rest\_encryption\_enabled](#input\_at\_rest\_encryption\_enabled) | Enable encryption at rest | `bool` | n/a | yes | @@ -112,13 +121,12 @@ No resources. | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [family](#input\_family) | Redis family | `string` | n/a | yes | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [ingress\_cidr\_blocks](#input\_ingress\_cidr\_blocks) | CIDR blocks for permitted ingress | `list(string)` | n/a | yes | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [multi\_az\_enabled](#input\_multi\_az\_enabled) | Multi AZ (Automatic Failover must also be enabled. If Cluster Mode is enabled, Multi AZ is on by default, and this setting is ignored) | `bool` | `false` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [port](#input\_port) | Port number | `number` | n/a | yes | @@ -136,9 +144,11 @@ No resources. |------|-------------| | [redis\_clusters](#output\_redis\_clusters) | Redis cluster objects | + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/elasticache-redis) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/elasticache-redis) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/elasticache-redis/default.auto.tfvars b/modules/elasticache-redis/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/elasticache-redis/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/elasticache-redis/main.tf b/modules/elasticache-redis/main.tf index 2f4003da5..0f2f91638 100644 --- a/modules/elasticache-redis/main.tf +++ b/modules/elasticache-redis/main.tf @@ -3,10 +3,8 @@ locals { eks_security_group_enabled = local.enabled && var.eks_security_group_enabled - vpc_cidr = module.vpc.outputs.vpc_cidr - allowed_cidr_blocks = concat( - [local.vpc_cidr], + var.allow_ingress_from_this_vpc ? [module.vpc.outputs.vpc_cidr] : [], var.ingress_cidr_blocks, [ for k in keys(module.vpc_ingress) : @@ -38,6 +36,7 @@ locals { vpc_id = module.vpc.outputs.vpc_id subnets = module.vpc.outputs.private_subnet_ids availability_zones = var.availability_zones + multi_az_enabled = var.multi_az_enabled allowed_security_groups = local.allowed_security_groups additional_security_group_rules = local.additional_security_group_rules @@ -50,6 +49,7 @@ locals { transit_encryption_enabled = var.transit_encryption_enabled apply_immediately = var.apply_immediately automatic_failover_enabled = var.automatic_failover_enabled + auto_minor_version_upgrade = var.auto_minor_version_upgrade cloudwatch_metric_alarms_enabled = var.cloudwatch_metric_alarms_enabled auth_token_enabled = var.auth_token_enabled } @@ -65,13 +65,15 @@ module "redis_clusters" { cluster_name = lookup(each.value, "cluster_name", replace(each.key, "_", "-")) dns_subdomain = join(".", [lookup(each.value, "cluster_name", replace(each.key, "_", "-")), module.this.environment]) - instance_type = each.value.instance_type - num_replicas = lookup(each.value, "num_replicas", 1) - num_shards = lookup(each.value, "num_shards", 0) - replicas_per_shard = lookup(each.value, "replicas_per_shard", 0) - engine_version = each.value.engine_version - parameters = each.value.parameters - cluster_attributes = local.cluster_attributes + instance_type = each.value.instance_type + num_replicas = lookup(each.value, "num_replicas", 1) + num_shards = lookup(each.value, "num_shards", 0) + replicas_per_shard = lookup(each.value, "replicas_per_shard", 0) + engine_version = each.value.engine_version + create_parameter_group = lookup(each.value, "create_parameter_group", true) + parameters = lookup(each.value, "parameters", null) + parameter_group_name = lookup(each.value, "parameter_group_name", null) + cluster_attributes = local.cluster_attributes context = module.this.context } diff --git a/modules/elasticache-redis/modules/redis_cluster/main.tf b/modules/elasticache-redis/modules/redis_cluster/main.tf index a82d1288b..37a3ee332 100644 --- a/modules/elasticache-redis/modules/redis_cluster/main.tf +++ b/modules/elasticache-redis/modules/redis_cluster/main.tf @@ -10,7 +10,7 @@ locals { module "redis" { source = "cloudposse/elasticache-redis/aws" - version = "0.44.0" + version = "1.4.1" name = var.cluster_name @@ -20,8 +20,10 @@ module "redis" { apply_immediately = var.cluster_attributes.apply_immediately at_rest_encryption_enabled = var.cluster_attributes.at_rest_encryption_enabled auth_token = local.auth_token + auto_minor_version_upgrade = var.cluster_attributes.auto_minor_version_upgrade automatic_failover_enabled = var.cluster_attributes.automatic_failover_enabled availability_zones = var.cluster_attributes.availability_zones + multi_az_enabled = var.cluster_attributes.multi_az_enabled cluster_mode_enabled = var.num_shards > 0 cluster_mode_num_node_groups = var.num_shards cluster_mode_replicas_per_node_group = var.replicas_per_shard @@ -30,7 +32,9 @@ module "redis" { engine_version = var.engine_version family = var.cluster_attributes.family instance_type = var.instance_type + create_parameter_group = var.create_parameter_group parameter = var.parameters + parameter_group_name = var.parameter_group_name port = var.cluster_attributes.port subnets = var.cluster_attributes.subnets transit_encryption_enabled = var.cluster_attributes.transit_encryption_enabled diff --git a/modules/elasticache-redis/modules/redis_cluster/variables.tf b/modules/elasticache-redis/modules/redis_cluster/variables.tf index 3366c1b2b..1c9af10cd 100644 --- a/modules/elasticache-redis/modules/redis_cluster/variables.tf +++ b/modules/elasticache-redis/modules/redis_cluster/variables.tf @@ -5,6 +5,12 @@ variable "cluster_name" { description = "Elasticache Cluster name" } +variable "create_parameter_group" { + type = bool + default = true + description = "Whether new parameter group should be created. Set to false if you want to use existing parameter group" +} + variable "engine_version" { type = string description = "Redis Version" @@ -49,10 +55,12 @@ variable "cluster_attributes" { family = string port = number zone_id = string + multi_az_enabled = bool at_rest_encryption_enabled = bool transit_encryption_enabled = bool apply_immediately = bool automatic_failover_enabled = bool + auto_minor_version_upgrade = bool auth_token_enabled = bool }) description = "Cluster attributes" @@ -66,6 +74,12 @@ variable "parameters" { description = "Parameters to configure cluster parameter group" } +variable "parameter_group_name" { + type = string + default = null + description = "Override the default parameter group name" +} + variable "kms_alias_name_ssm" { default = "alias/aws/ssm" description = "KMS alias name for SSM" diff --git a/modules/elasticache-redis/modules/redis_cluster/versions.tf b/modules/elasticache-redis/modules/redis_cluster/versions.tf index b3730a19e..5b9bb0612 100644 --- a/modules/elasticache-redis/modules/redis_cluster/versions.tf +++ b/modules/elasticache-redis/modules/redis_cluster/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } random = { source = "hashicorp/random" diff --git a/modules/elasticache-redis/providers.tf b/modules/elasticache-redis/providers.tf index efa9ede5d..ef923e10a 100644 --- a/modules/elasticache-redis/providers.tf +++ b/modules/elasticache-redis/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/elasticache-redis/remote-state.tf b/modules/elasticache-redis/remote-state.tf index 5b320c9a9..fa1eb2ece 100644 --- a/modules/elasticache-redis/remote-state.tf +++ b/modules/elasticache-redis/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" component = "vpc" @@ -9,7 +9,7 @@ module "vpc" { module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" for_each = local.eks_security_group_enabled ? var.eks_component_names : toset([]) @@ -20,7 +20,7 @@ module "eks" { module "dns_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" component = "dns-delegated" environment = "gbl" @@ -30,7 +30,7 @@ module "dns_delegated" { module "vpc_ingress" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" for_each = toset(var.allow_ingress_from_vpc_stages) diff --git a/modules/elasticache-redis/variables.tf b/modules/elasticache-redis/variables.tf index 3b25582c5..b059c6c36 100644 --- a/modules/elasticache-redis/variables.tf +++ b/modules/elasticache-redis/variables.tf @@ -9,6 +9,12 @@ variable "availability_zones" { default = [] } +variable "multi_az_enabled" { + type = bool + default = false + description = "Multi AZ (Automatic Failover must also be enabled. If Cluster Mode is enabled, Multi AZ is on by default, and this setting is ignored)" +} + variable "family" { type = string description = "Redis family" @@ -59,6 +65,12 @@ variable "automatic_failover_enabled" { description = "Enable automatic failover" } +variable "auto_minor_version_upgrade" { + type = bool + description = "Specifies whether minor version engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Only supported if the engine version is 6 or higher." + default = false +} + variable "cloudwatch_metric_alarms_enabled" { type = bool description = "Boolean flag to enable/disable CloudWatch metrics alarms" @@ -69,6 +81,12 @@ variable "redis_clusters" { description = "Redis cluster configuration" } +variable "allow_ingress_from_this_vpc" { + type = bool + default = true + description = "If set to `true`, allow ingress from the VPC CIDR for this account" +} + variable "allow_ingress_from_vpc_stages" { type = list(string) default = [] diff --git a/modules/elasticache-redis/versions.tf b/modules/elasticache-redis/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/elasticache-redis/versions.tf +++ b/modules/elasticache-redis/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/elasticsearch/README.md b/modules/elasticsearch/README.md index b82625c22..710e244eb 100644 --- a/modules/elasticsearch/README.md +++ b/modules/elasticsearch/README.md @@ -1,6 +1,14 @@ +--- +tags: + - component/elasticsearch + - layer/data + - provider/aws +--- + # Component: `elasticsearch` -This component is responsible for provisioning an Elasticsearch cluster with built-in integrations with Kibana and Logstash. +This component is responsible for provisioning an Elasticsearch cluster with built-in integrations with Kibana and +Logstash. ## Usage @@ -11,12 +19,13 @@ Here's an example snippet for how to use this component. ```yaml components: terraform: - elasticache-redis: + elasticsearch: vars: enabled: true + name: foobar instance_type: "t3.medium.elasticsearch" elasticsearch_version: "7.9" - encrypt_at_rest_enabled: false + encrypt_at_rest_enabled: true dedicated_master_enabled: false elasticsearch_subdomain_name: "es" kibana_subdomain_name: "kibana" @@ -26,31 +35,33 @@ components: domain_hostname_enabled: true ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.0 | -| [aws](#requirement\_aws) | >= 3.8 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [random](#requirement\_random) | >= 3.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 3.8 | -| [random](#provider\_random) | n/a | +| [aws](#provider\_aws) | >= 4.9.0 | +| [random](#provider\_random) | >= 3.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.17.0 | -| [elasticsearch](#module\_elasticsearch) | cloudposse/elasticsearch/aws | 0.33.0 | -| [elasticsearch\_log\_cleanup](#module\_elasticsearch\_log\_cleanup) | cloudposse/lambda-elasticsearch-cleanup/aws | 0.12.3 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [elasticsearch](#module\_elasticsearch) | cloudposse/elasticsearch/aws | 0.42.0 | +| [elasticsearch\_log\_cleanup](#module\_elasticsearch\_log\_cleanup) | cloudposse/lambda-elasticsearch-cleanup/aws | 0.14.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [this](#module\_this) | cloudposse/label/null | 0.24.1 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.17.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -65,12 +76,16 @@ components: | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional tags for appending to tags\_as\_list\_of\_maps. Not added to `tags`. | `map(string)` | `{}` | no | -| [attributes](#input\_attributes) | Additional attributes (e.g. `1`) | `list(string)` | `[]` | no | -| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {}
}
| no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [create\_iam\_service\_linked\_role](#input\_create\_iam\_service\_linked\_role) | Whether to create `AWSServiceRoleForAmazonElasticsearchService` service-linked role.
Set this to `false` if you already have an ElasticSearch cluster created in the AWS account and `AWSServiceRoleForAmazonElasticsearchService` already exists.
See https://github.com/terraform-providers/terraform-provider-aws/issues/5218 for more information. | `bool` | n/a | yes | +| [dedicated\_master\_count](#input\_dedicated\_master\_count) | Number of dedicated master nodes in the cluster | `number` | `0` | no | | [dedicated\_master\_enabled](#input\_dedicated\_master\_enabled) | Indicates whether dedicated master nodes are enabled for the cluster | `bool` | n/a | yes | -| [delimiter](#input\_delimiter) | Delimiter to be used between `namespace`, `environment`, `stage`, `name` and `attributes`.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [dedicated\_master\_type](#input\_dedicated\_master\_type) | Instance type of the dedicated master nodes in the cluster | `string` | `"t2.small.elasticsearch"` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dns\_delegated\_environment\_name](#input\_dns\_delegated\_environment\_name) | The name of the environment where the `dns-delegated` component is deployed | `string` | `"gbl"` | no | | [domain\_hostname\_enabled](#input\_domain\_hostname\_enabled) | Explicit flag to enable creating a DNS hostname for ES. If `true`, then `var.dns_zone_id` is required. | `bool` | n/a | yes | | [ebs\_volume\_size](#input\_ebs\_volume\_size) | EBS volumes for data storage in GB | `number` | n/a | yes | | [elasticsearch\_iam\_actions](#input\_elasticsearch\_iam\_actions) | List of actions to allow for the IAM roles, _e.g._ `es:ESHttpGet`, `es:ESHttpPut`, `es:ESHttpPost` | `list(string)` |
[
"es:ESHttpGet",
"es:ESHttpPut",
"es:ESHttpPost",
"es:ESHttpHead",
"es:Describe*",
"es:List*"
]
| no | @@ -80,21 +95,22 @@ components: | [elasticsearch\_version](#input\_elasticsearch\_version) | Version of Elasticsearch to deploy (\_e.g.\_ `7.1`, `6.8`, `6.7`, `6.5`, `6.4`, `6.3`, `6.2`, `6.0`, `5.6`, `5.5`, `5.3`, `5.1`, `2.3`, `1.5` | `string` | n/a | yes | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [encrypt\_at\_rest\_enabled](#input\_encrypt\_at\_rest\_enabled) | Whether to enable encryption at rest | `bool` | n/a | yes | -| [environment](#input\_environment) | Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for default, which is `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | IAM Profile to use when importing a resource | `string` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | | [instance\_type](#input\_instance\_type) | The type of the instance | `string` | n/a | yes | | [kibana\_hostname\_enabled](#input\_kibana\_hostname\_enabled) | Explicit flag to enable creating a DNS hostname for Kibana. If `true`, then `var.dns_zone_id` is required. | `bool` | n/a | yes | | [kibana\_subdomain\_name](#input\_kibana\_subdomain\_name) | The name of the subdomain for Kibana in the DNS zone (\_e.g.\_ `kibana`, `ui`, `ui-es`, `search-ui`, `kibana.elasticsearch`) | `string` | n/a | yes | -| [label\_key\_case](#input\_label\_key\_case) | The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | -| [label\_order](#input\_label\_order) | The naming order of the id output and Name tag.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 5 elements, but at least one must be present. | `list(string)` | `null` | no | -| [label\_value\_case](#input\_label\_value\_case) | The letter case of output label values (also used in `tags` and `id`).
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Default value: `lower`. | `string` | `null` | no | -| [name](#input\_name) | Solution name, e.g. 'app' or 'jenkins' | `string` | `null` | no | -| [namespace](#input\_namespace) | Namespace, which could be your organization name or abbreviation, e.g. 'eg' or 'cp' | `string` | `null` | no | -| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Regex to replace chars with empty string in `namespace`, `environment`, `stage` and `name`.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS region | `string` | n/a | yes | -| [stage](#input\_stage) | Stage, e.g. 'prod', 'staging', 'dev', OR 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | -| [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit','XYZ')` | `map(string)` | `{}` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | ## Outputs @@ -111,8 +127,11 @@ components: | [master\_password\_ssm\_key](#output\_master\_password\_ssm\_key) | SSM key of Elasticsearch master password | | [security\_group\_id](#output\_security\_group\_id) | Security Group ID to control access to the Elasticsearch domain | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/elasticsearch) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/elasticsearch) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/elasticsearch/context.tf b/modules/elasticsearch/context.tf index d4bf134dd..5e0ef8856 100644 --- a/modules/elasticsearch/context.tf +++ b/modules/elasticsearch/context.tf @@ -8,6 +8,8 @@ # Cloud Posse's standard configuration inputs suitable for passing # to Cloud Posse modules. # +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# # Modules should access the whole context as `module.this.context` # to get the input variables with nulls for defaults, # for example `context = module.this.context`, @@ -20,10 +22,11 @@ module "this" { source = "cloudposse/label/null" - version = "0.24.1" # requires Terraform >= 0.13.0 + version = "0.25.0" # requires Terraform >= 0.13.0 enabled = var.enabled namespace = var.namespace + tenant = var.tenant environment = var.environment stage = var.stage name = var.name @@ -36,6 +39,8 @@ module "this" { id_length_limit = var.id_length_limit label_key_case = var.label_key_case label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags context = var.context } @@ -47,6 +52,7 @@ variable "context" { default = { enabled = true namespace = null + tenant = null environment = null stage = null name = null @@ -59,6 +65,15 @@ variable "context" { id_length_limit = null label_key_case = null label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] } description = <<-EOT Single object for setting entire context at once. @@ -88,32 +103,42 @@ variable "enabled" { variable "namespace" { type = string default = null - description = "Namespace, which could be your organization name or abbreviation, e.g. 'eg' or 'cp'" + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" } variable "environment" { type = string default = null - description = "Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT'" + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" } variable "stage" { type = string default = null - description = "Stage, e.g. 'prod', 'staging', 'dev', OR 'source', 'build', 'test', 'deploy', 'release'" + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" } variable "name" { type = string default = null - description = "Solution name, e.g. 'app' or 'jenkins'" + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT } variable "delimiter" { type = string default = null description = <<-EOT - Delimiter to be used between `namespace`, `environment`, `stage`, `name` and `attributes`. + Delimiter to be used between ID elements. Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. EOT } @@ -121,36 +146,64 @@ variable "delimiter" { variable "attributes" { type = list(string) default = [] - description = "Additional attributes (e.g. `1`)" + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT } variable "tags" { type = map(string) default = {} - description = "Additional tags (e.g. `map('BusinessUnit','XYZ')`" + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT } variable "additional_tag_map" { type = map(string) default = {} - description = "Additional tags for appending to tags_as_list_of_maps. Not added to `tags`." + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT } variable "label_order" { type = list(string) default = null description = <<-EOT - The naming order of the id output and Name tag. + The order in which the labels (ID elements) appear in the `id`. Defaults to ["namespace", "environment", "stage", "name", "attributes"]. - You can omit any of the 5 elements, but at least one must be present. - EOT + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT } variable "regex_replace_chars" { type = string default = null description = <<-EOT - Regex to replace chars with empty string in `namespace`, `environment`, `stage` and `name`. + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. EOT } @@ -161,7 +214,7 @@ variable "id_length_limit" { description = <<-EOT Limit `id` to this many characters (minimum 6). Set to `0` for unlimited length. - Set to `null` for default, which is `0`. + Set to `null` for keep the existing setting, which defaults to `0`. Does not affect `id_full`. EOT validation { @@ -174,7 +227,8 @@ variable "label_key_case" { type = string default = null description = <<-EOT - The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`. + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. Possible values: `lower`, `title`, `upper`. Default value: `title`. EOT @@ -189,8 +243,11 @@ variable "label_value_case" { type = string default = null description = <<-EOT - The letter case of output label values (also used in `tags` and `id`). + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. Default value: `lower`. EOT @@ -199,4 +256,24 @@ variable "label_value_case" { error_message = "Allowed values: `lower`, `title`, `upper`, `none`." } } -#### End of copy of cloudposse/terraform-null-label/variables.tf \ No newline at end of file + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/elasticsearch/default.auto.tfvars b/modules/elasticsearch/default.auto.tfvars deleted file mode 100644 index 9fd27e55a..000000000 --- a/modules/elasticsearch/default.auto.tfvars +++ /dev/null @@ -1,41 +0,0 @@ -enabled = false - -name = "es" - -instance_type = "t3.medium.elasticsearch" - -elasticsearch_version = "7.9" - -# calculated: length(local.vpc_private_subnet_ids) -# instance_count = 2 - -# calculated: length(local.vpc_private_subnet_ids) > 1 ? true : false -# zone_awareness_enabled = true - -encrypt_at_rest_enabled = false - -dedicated_master_enabled = false - -elasticsearch_subdomain_name = "es" - -kibana_subdomain_name = "kibana" - -ebs_volume_size = 40 - -create_iam_service_linked_role = true - -kibana_hostname_enabled = true - -domain_hostname_enabled = true - -# Allow anonymous access without request signing, relying on network access controls -# https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html#es-ac-types-ip -# https://aws.amazon.com/premiumsupport/knowledge-center/anonymous-not-authorized-elasticsearch/ -elasticsearch_iam_role_arns = [ - "*", -] -elasticsearch_iam_actions = [ - "es:ESHttpGet", "es:ESHttpPut", "es:ESHttpPost", "es:ESHttpHead", "es:Describe*", "es:List*", - // delete and patch are destructive and could be left out - "es:ESHttpDelete", "es:ESHttpPatch" -] diff --git a/modules/elasticsearch/main.tf b/modules/elasticsearch/main.tf index 2e4c38eed..25f8d5756 100644 --- a/modules/elasticsearch/main.tf +++ b/modules/elasticsearch/main.tf @@ -18,7 +18,7 @@ locals { module "elasticsearch" { source = "cloudposse/elasticsearch/aws" - version = "0.33.0" + version = "0.42.0" security_groups = [local.vpc_default_security_group] vpc_id = local.vpc_id @@ -30,6 +30,8 @@ module "elasticsearch" { availability_zone_count = length(local.vpc_private_subnet_ids) encrypt_at_rest_enabled = var.encrypt_at_rest_enabled dedicated_master_enabled = var.dedicated_master_enabled + dedicated_master_count = var.dedicated_master_enabled ? var.dedicated_master_count : null + dedicated_master_type = var.dedicated_master_enabled ? var.dedicated_master_type : null create_iam_service_linked_role = var.create_iam_service_linked_role kibana_subdomain_name = module.this.environment ebs_volume_size = var.ebs_volume_size @@ -99,7 +101,7 @@ resource "aws_ssm_parameter" "elasticsearch_kibana_endpoint" { module "elasticsearch_log_cleanup" { source = "cloudposse/lambda-elasticsearch-cleanup/aws" - version = "0.12.3" + version = "0.14.0" es_endpoint = module.elasticsearch.domain_endpoint es_domain_arn = module.elasticsearch.domain_arn diff --git a/modules/elasticsearch/providers.tf b/modules/elasticsearch/providers.tf index 908fbd595..ef923e10a 100644 --- a/modules/elasticsearch/providers.tf +++ b/modules/elasticsearch/providers.tf @@ -1,17 +1,19 @@ provider "aws" { region = var.region - # `terraform import` will not use data from a data source, so on import we have to explicitly specify the profile - profile = coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } } module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "IAM Profile to use when importing a resource" -} diff --git a/modules/elasticsearch/remote-state.tf b/modules/elasticsearch/remote-state.tf index bd8a75ff2..950d6d996 100644 --- a/modules/elasticsearch/remote-state.tf +++ b/modules/elasticsearch/remote-state.tf @@ -1,21 +1,18 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.17.0" + version = "1.5.0" - stack_config_local_path = "../../../stacks" - component = "vpc" + component = "vpc" context = module.this.context - enabled = true } module "dns_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.17.0" + version = "1.5.0" - stack_config_local_path = "../../../stacks" - component = "dns-delegated" + component = "dns-delegated" + environment = var.dns_delegated_environment_name context = module.this.context - enabled = true } diff --git a/modules/elasticsearch/variables.tf b/modules/elasticsearch/variables.tf index a5328ce3b..c47487d09 100644 --- a/modules/elasticsearch/variables.tf +++ b/modules/elasticsearch/variables.tf @@ -23,6 +23,18 @@ variable "dedicated_master_enabled" { description = "Indicates whether dedicated master nodes are enabled for the cluster" } +variable "dedicated_master_count" { + type = number + description = "Number of dedicated master nodes in the cluster" + default = 0 +} + +variable "dedicated_master_type" { + type = string + default = "t2.small.elasticsearch" + description = "Instance type of the dedicated master nodes in the cluster" +} + variable "elasticsearch_subdomain_name" { type = string description = "The name of the subdomain for Elasticsearch in the DNS zone (_e.g._ `elasticsearch`, `ui`, `ui-es`, `search-ui`)" @@ -87,3 +99,9 @@ variable "elasticsearch_password" { error_message = "Password must be between 8 and 128 characters. If null is provided then a random password will be used." } } + +variable "dns_delegated_environment_name" { + type = string + description = "The name of the environment where the `dns-delegated` component is deployed" + default = "gbl" +} diff --git a/modules/elasticsearch/versions.tf b/modules/elasticsearch/versions.tf index 207f9f727..4a6389362 100644 --- a/modules/elasticsearch/versions.tf +++ b/modules/elasticsearch/versions.tf @@ -1,10 +1,14 @@ terraform { - required_version = ">= 0.13.0" + required_version = ">= 1.0.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.8" + version = ">= 4.9.0" + } + random = { + source = "hashicorp/random" + version = ">= 3.0" } } } diff --git a/modules/eventbridge/README.md b/modules/eventbridge/README.md new file mode 100644 index 000000000..a406e4d7e --- /dev/null +++ b/modules/eventbridge/README.md @@ -0,0 +1,119 @@ +--- +tags: + - component/eventbridge + - layer/unassigned + - provider/aws +--- + +# Component: `eventbridge` + +The `eventbridge` component is a Terraform module that defines a CloudWatch EventBridge rule. The rule is pointed at +cloudwatch by default. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + eventbridge/ecs-alerts: + metadata: + component: eventbridge + vars: + name: ecs-faults + enabled: true + cloudwatch_event_rule_description: "ECS failures and warnings" + cloudwatch_event_rule_pattern: + source: + - aws.ecs + detail: + $or: + - eventType: + - WARN + - ERROR + - agentConnected: + - false + - containers: + exitCode: + - anything-but: + - 0 +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [cloudwatch\_event](#module\_cloudwatch\_event) | cloudposse/cloudwatch-events/aws | 0.7.0 | +| [cloudwatch\_logs](#module\_cloudwatch\_logs) | cloudposse/cloudwatch-logs/aws | 0.6.8 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_log_resource_policy.eventbridge_cloudwatch_logs_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_resource_policy) | resource | +| [aws_iam_policy_document.eventbridge_cloudwatch_logs_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [cloudwatch\_event\_rule\_description](#input\_cloudwatch\_event\_rule\_description) | Description of the CloudWatch Event Rule. If empty, will default to `module.this.id` | `string` | `""` | no | +| [cloudwatch\_event\_rule\_pattern](#input\_cloudwatch\_event\_rule\_pattern) | Pattern of the CloudWatch Event Rule | `any` |
{
"source": [
"aws.ec2"
]
}
| no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [event\_log\_retention\_in\_days](#input\_event\_log\_retention\_in\_days) | Number of days to retain the event logs | `number` | `3` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [cloudwatch\_event\_rule\_arn](#output\_cloudwatch\_event\_rule\_arn) | The ARN of the CloudWatch Event Rule | +| [cloudwatch\_event\_rule\_name](#output\_cloudwatch\_event\_rule\_name) | The name of the CloudWatch Event Rule | +| [cloudwatch\_logs\_log\_group\_arn](#output\_cloudwatch\_logs\_log\_group\_arn) | The ARN of the CloudWatch Log Group | +| [cloudwatch\_logs\_log\_group\_name](#output\_cloudwatch\_logs\_log\_group\_name) | The name of the CloudWatch Log Group | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eventbridge) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/eventbridge/context.tf b/modules/eventbridge/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/eventbridge/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/eventbridge/main.tf b/modules/eventbridge/main.tf new file mode 100644 index 000000000..7f34f42d5 --- /dev/null +++ b/modules/eventbridge/main.tf @@ -0,0 +1,26 @@ +locals { + enabled = module.this.enabled + description = var.cloudwatch_event_rule_description != "" ? var.cloudwatch_event_rule_description : module.this.id +} + +module "cloudwatch_logs" { + source = "cloudposse/cloudwatch-logs/aws" + version = "0.6.8" + count = local.enabled ? 1 : 0 + + retention_in_days = var.event_log_retention_in_days + + context = module.this.context +} + +module "cloudwatch_event" { + source = "cloudposse/cloudwatch-events/aws" + version = "0.7.0" + count = local.enabled ? 1 : 0 + + cloudwatch_event_rule_description = local.description + cloudwatch_event_rule_pattern = var.cloudwatch_event_rule_pattern + cloudwatch_event_target_arn = one(module.cloudwatch_logs[*].log_group_arn) + + context = module.this.context +} diff --git a/modules/eventbridge/outputs.tf b/modules/eventbridge/outputs.tf new file mode 100644 index 000000000..335d63148 --- /dev/null +++ b/modules/eventbridge/outputs.tf @@ -0,0 +1,19 @@ +output "cloudwatch_logs_log_group_arn" { + description = "The ARN of the CloudWatch Log Group" + value = one(module.cloudwatch_logs[*].log_group_arn) +} + +output "cloudwatch_logs_log_group_name" { + description = "The name of the CloudWatch Log Group" + value = one(module.cloudwatch_logs[*].log_group_name) +} + +output "cloudwatch_event_rule_arn" { + description = "The ARN of the CloudWatch Event Rule" + value = one(module.cloudwatch_event[*].cloudwatch_event_rule_arn) +} + +output "cloudwatch_event_rule_name" { + description = "The name of the CloudWatch Event Rule" + value = one(module.cloudwatch_event[*].cloudwatch_event_rule_id) +} diff --git a/modules/eventbridge/policies.tf b/modules/eventbridge/policies.tf new file mode 100644 index 000000000..e43dbd106 --- /dev/null +++ b/modules/eventbridge/policies.tf @@ -0,0 +1,33 @@ + +# Note, we need to allow the eventbridge to write to cloudwatch logs +# we use aws_cloudwatch_log_resource_policy to do this + +locals { + log_group_arn = one(module.cloudwatch_logs[*].log_group_arn) +} +data "aws_iam_policy_document" "eventbridge_cloudwatch_logs_policy" { + statement { + principals { + type = "Service" + identifiers = [ + "events.amazonaws.com", + "delivery.logs.amazonaws.com", + ] + } + + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents", + ] + + resources = [ + "${local.log_group_arn}:*", + ] + } +} + +resource "aws_cloudwatch_log_resource_policy" "eventbridge_cloudwatch_logs_policy" { + count = local.enabled ? 1 : 0 + policy_document = data.aws_iam_policy_document.eventbridge_cloudwatch_logs_policy.json + policy_name = module.this.id +} diff --git a/modules/eventbridge/providers.tf b/modules/eventbridge/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/eventbridge/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/eventbridge/remote-state.tf b/modules/eventbridge/remote-state.tf new file mode 100644 index 000000000..e69de29bb diff --git a/modules/eventbridge/variables.tf b/modules/eventbridge/variables.tf new file mode 100644 index 000000000..d53dd014b --- /dev/null +++ b/modules/eventbridge/variables.tf @@ -0,0 +1,26 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "cloudwatch_event_rule_description" { + type = string + description = "Description of the CloudWatch Event Rule. If empty, will default to `module.this.id`" + default = "" +} + +variable "cloudwatch_event_rule_pattern" { + type = any + description = "Pattern of the CloudWatch Event Rule" + default = { + "source" = [ + "aws.ec2" + ] + } +} + +variable "event_log_retention_in_days" { + type = number + description = "Number of days to retain the event logs" + default = 3 +} diff --git a/modules/eventbridge/versions.tf b/modules/eventbridge/versions.tf new file mode 100644 index 000000000..4c8603db1 --- /dev/null +++ b/modules/eventbridge/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/github-action-token-rotator/README.md b/modules/github-action-token-rotator/README.md index 8a4091e64..dd566a83c 100644 --- a/modules/github-action-token-rotator/README.md +++ b/modules/github-action-token-rotator/README.md @@ -1,6 +1,14 @@ +--- +tags: + - component/github-action-token-rotator + - layer/github + - provider/aws +--- + # Component: `github-action-token-rotator` -This component is responsible for provisioning [Github Action Token Rotator](https://github.com/cloudposse/terraform-aws-github-action-token-rotator). +This component is responsible for provisioning +[Github Action Token Rotator](https://github.com/cloudposse/terraform-aws-github-action-token-rotator). This component creates a Lambda to rotate Github Action tokens in SSM Parameter Store. @@ -8,7 +16,8 @@ This component creates a Lambda to rotate Github Action tokens in SSM Parameter **Stack Level**: Regional -Here's an example snippet for how to use this component. This is generally deployed once and to the automation account's primary region. +Here's an example snippet for how to use this component. This is generally deployed once and to the automation account's +primary region. `stacks/catalog/github-action-token-rotator.yaml` file: @@ -25,15 +34,18 @@ components: parameter_store_token_path: /github/runners/my-org/registrationToken ``` -Follow the manual steps using the [guide in the upstream module](https://github.com/cloudposse/terraform-aws-github-action-token-rotator#quick-start) and use `chamber` to add the secrets to the appropriate stage. +Follow the manual steps using the +[guide in the upstream module](https://github.com/cloudposse/terraform-aws-github-action-token-rotator#quick-start) and +use `chamber` to add the secrets to the appropriate stage. + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers @@ -66,8 +78,6 @@ No resources. | [github\_app\_installation\_id](#input\_github\_app\_installation\_id) | GitHub App Installation ID | `string` | n/a | yes | | [github\_org\_name](#input\_github\_org\_name) | SSM parameter name format | `string` | n/a | yes | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -88,9 +98,11 @@ No resources. |------|-------------| | [github\_action\_token\_rotator](#output\_github\_action\_token\_rotator) | GitHub action token rotator module outputs. | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/github-action-token-rotator) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/github-action-token-rotator) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/github-action-token-rotator/default.auto.tfvars b/modules/github-action-token-rotator/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/github-action-token-rotator/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/github-action-token-rotator/main.tf b/modules/github-action-token-rotator/main.tf index e62cf837d..5e43bd00b 100644 --- a/modules/github-action-token-rotator/main.tf +++ b/modules/github-action-token-rotator/main.tf @@ -19,4 +19,3 @@ module "github_action_token_rotator" { context = module.this.context } - diff --git a/modules/github-action-token-rotator/outputs.tf b/modules/github-action-token-rotator/outputs.tf index 9b6358cc8..d21f0e031 100644 --- a/modules/github-action-token-rotator/outputs.tf +++ b/modules/github-action-token-rotator/outputs.tf @@ -2,4 +2,3 @@ output "github_action_token_rotator" { value = module.github_action_token_rotator description = "GitHub action token rotator module outputs." } - diff --git a/modules/github-action-token-rotator/providers.tf b/modules/github-action-token-rotator/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/github-action-token-rotator/providers.tf +++ b/modules/github-action-token-rotator/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/github-action-token-rotator/versions.tf b/modules/github-action-token-rotator/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/github-action-token-rotator/versions.tf +++ b/modules/github-action-token-rotator/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/github-oidc-provider/README.md b/modules/github-oidc-provider/README.md index 734fbbcfe..e2d38fa7a 100644 --- a/modules/github-oidc-provider/README.md +++ b/modules/github-oidc-provider/README.md @@ -1,18 +1,25 @@ +--- +tags: + - component/github-oidc-provider + - layer/github + - provider/aws + - privileged +--- + # Component: `github-oidc-provider` -This component is responsible for authorizing the GitHub OIDC provider -as an Identity provider for an AWS account. It is meant to be used -in concert with `aws-teams` and `aws-team-roles` and/or with -`github-actions-iam-role.mixin.tf` +This component is responsible for authorizing the GitHub OIDC provider as an Identity provider for an AWS account. It is +meant to be used in concert with `aws-teams` and `aws-team-roles` and/or with `github-actions-iam-role.mixin.tf` ## Usage **Stack Level**: Global Here's an example snippet for how to use this component. + - This must be installed in the `identity` account in order to use standard SAML roles with role chaining. -- This must be installed in each individual account where you want to provision a service role for a GitHub action - that will be assumed directly by the action. +- This must be installed in each individual account where you want to provision a service role for a GitHub action that + will be assumed directly by the action. For security, since this component adds an identity provider, only SuperAdmin can install it. @@ -26,13 +33,39 @@ components: ## Configuring the Github OIDC Provider -This component was created to add the Github OIDC provider so that Github Actions can safely assume roles -without the need to store static credentials in the environment. -The details of the GitHub OIDC provider are hard coded in the component, however at some point -the provider's thumbprint may change, at which point you can use -[scripts/get_github_oidc_thumbprint.sh](./scripts/get_github_oidc_thumbprint.sh) +This component was created to add the Github OIDC provider so that Github Actions can safely assume roles without the +need to store static credentials in the environment. The details of the GitHub OIDC provider are hard coded in the +component, however at some point the provider's thumbprint may change, at which point you can use +[get_github_oidc_thumbprint.sh](https://github.com/cloudposse/terraform-aws-components/blob/main/modules/github-oidc-provider/scripts/get_github_oidc_thumbprint.sh) to get the new thumbprint and add it to the list in `var.thumbprint_list`. +This script will pull one of two thumbprints. There are two possible intermediary certificates for the Actions SSL +certificate and either can be returned by the GitHub servers, requiring customers to trust both. This is a known +behavior when the intermediary certificates are cross-signed by the CA. Therefore, run this script until both values are +retrieved. Add both to `var.thumbprint_list`. + +For more, see https://github.blog/changelog/2023-06-27-github-actions-update-on-oidc-integration-with-aws/ + +## FAQ + +### I cannot assume the role from GitHub Actions after deploying + +The following error is very common if the GitHub workflow is missing proper permission. + +```bash +Error: User: arn:aws:sts::***:assumed-role/acme-core-use1-auto-actions-runner@actions-runner-system/token-file-web-identity is not authorized to perform: sts:TagSession on resource: arn:aws:iam::999999999999:role/acme-plat-use1-dev-gha +``` + +In order to use a web identity, GitHub Action pipelines must have the following permission. See +[GitHub Action documentation for more](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services#adding-permissions-settings). + +```yaml +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout +``` + + ## Requirements @@ -72,7 +105,6 @@ to get the new thumbprint and add it to the list in `var.thumbprint_list`. | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -82,9 +114,10 @@ to get the new thumbprint and add it to the list in `var.thumbprint_list`. | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [superadmin](#input\_superadmin) | Set `true` if running as the SuperAdmin user | `bool` | `false` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [thumbprint\_list](#input\_thumbprint\_list) | List of OIDC provider certificate thumbprints | `list(string)` |
[
"6938fd4d98bab03faadb97b34396831e3780aea1"
]
| no | +| [thumbprint\_list](#input\_thumbprint\_list) | List of OIDC provider certificate thumbprints | `list(string)` |
[
"6938fd4d98bab03faadb97b34396831e3780aea1",
"1c58a3a8518e8759bf075b76b750d4f2df264fcd"
]
| no | ## Outputs @@ -92,10 +125,11 @@ to get the new thumbprint and add it to the list in `var.thumbprint_list`. |------|-------------| | [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | GitHub OIDC provider ARN | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/github-oidc-provider) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/github-oidc-provider) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/github-oidc-provider/providers.tf b/modules/github-oidc-provider/providers.tf index f1096ef8d..13a1a5b6a 100644 --- a/modules/github-oidc-provider/providers.tf +++ b/modules/github-oidc-provider/providers.tf @@ -1,26 +1,50 @@ +# This is a special provider configuration that allows us to use many different +# versions of the Cloud Posse reference architecture to deploy this component +# in any account, including the identity and root accounts. + +# If you have dynamic Terraform roles enabled and an `aws-team` (such as `managers`) +# empowered to make changes in the identity and root accounts. Then you can +# use those roles to deploy this component in the identity and root accounts, +# just like almost any other component. +# +# If you are restricted to using the SuperAdmin role to deploy this component +# in the identity and root accounts, then modify the stack configuration for +# this component for the identity and/or root accounts to set `superadmin: true` +# and backend `role_arn` to `null`. +# +# components: +# terraform: +# github-oidc-provider: +# backend: +# s3: +# role_arn: null +# vars: +# superadmin: true + provider "aws" { region = var.region - # github-oidc-provider, since it authorizes SAML IdPs, should be run as SuperAdmin as a security matter, - # and therefore cannot use "profile" instead of "role_arn" even if the components are generally using profiles. - # Note the role_arn is the ARN of the OrganizationAccountAccessRole, not the SAML role. - + profile = !var.superadmin && module.iam_roles.profiles_enabled ? module.iam_roles.terraform_profile_name : null dynamic "assume_role" { - for_each = var.import_role_arn == null ? (module.iam_roles.org_role_arn != null ? [true] : []) : ["import"] + for_each = !var.superadmin && module.iam_roles.profiles_enabled ? [] : ( + var.superadmin ? compact([module.iam_roles.org_role_arn]) : compact([module.iam_roles.terraform_role_arn]) + ) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.org_role_arn) + role_arn = assume_role.value } } } + module "iam_roles" { source = "../account-map/modules/iam-roles" - privileged = true - context = module.this.context + privileged = var.superadmin + + context = module.this.context } -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" +variable "superadmin" { + type = bool + default = false + description = "Set `true` if running as the SuperAdmin user" } diff --git a/modules/github-oidc-provider/scripts/get_github_oidc_thumbprint.sh b/modules/github-oidc-provider/scripts/get_github_oidc_thumbprint.sh index 326ebb28c..5667f8f13 100755 --- a/modules/github-oidc-provider/scripts/get_github_oidc_thumbprint.sh +++ b/modules/github-oidc-provider/scripts/get_github_oidc_thumbprint.sh @@ -2,8 +2,15 @@ ######################################################################################################################## # This script downloads the certificate information from $GITHUB_OIDC_HOST, extracts the certificate material, then uses -# the openssl command to calculate the thumbprint. It is meant to be called manually and the output used to populate +# the openssl command to calculate the thumbprint. It is meant to be called manually and the output used to populate # the `thumbprint_list` variable in the terraform configuration for this module. +# +# This script will pull one of two thumbprints. There are two possible intermediary certificates for the Actions SSL +# certificate and either can be returned by the GitHub servers, requiring customers to trust both. This is a known +# behavior when the intermediary certificates are cross-signed by the CA. Therefore, run this script until both values +# are retrieved. +# +# For more, see https://github.blog/changelog/2023-06-27-github-actions-update-on-oidc-integration-with-aws/ ######################################################################################################################## GITHUB_OIDC_HOST="token.actions.githubusercontent.com" THUMBPRINT=$(echo \ diff --git a/modules/github-oidc-provider/variables.tf b/modules/github-oidc-provider/variables.tf index ee40228a2..0eb4879d1 100644 --- a/modules/github-oidc-provider/variables.tf +++ b/modules/github-oidc-provider/variables.tf @@ -6,5 +6,5 @@ variable "region" { variable "thumbprint_list" { type = list(string) description = "List of OIDC provider certificate thumbprints" - default = ["6938fd4d98bab03faadb97b34396831e3780aea1"] + default = ["6938fd4d98bab03faadb97b34396831e3780aea1", "1c58a3a8518e8759bf075b76b750d4f2df264fcd"] } diff --git a/modules/github-oidc-role/README.md b/modules/github-oidc-role/README.md new file mode 100644 index 000000000..e4bce3939 --- /dev/null +++ b/modules/github-oidc-role/README.md @@ -0,0 +1,257 @@ +--- +tags: + - component/github-oidc-role + - layer/github + - provider/aws + - privileged +--- + +# Component: `github-oidc-role` + +This component is responsible for creating IAM roles for GitHub Actions to assume. + +## Usage + +**Stack Level**: Global + +Here's an example snippet for how to use this component. + +```yaml +# stacks/catalog/github-oidc-role/defaults.yaml +components: + terraform: + github-oidc-role/defaults: + metadata: + type: abstract + vars: + enabled: true + name: gha-iam + # Note: inherited lists are not merged, they are replaced + github_actions_allowed_repos: + - MyOrg/* ## allow all repos in MyOrg +``` + +Example using for gitops (predefined policy): + +```yaml +# stacks/catalog/github-oidc-role/gitops.yaml +import: + - catalog/github-oidc-role/defaults + +components: + terraform: + github-oidc-role/gitops: + metadata: + component: github-oidc-role + inherits: + - github-oidc-role/defaults + vars: + enabled: true + # Note: inherited lists are not merged, they are replaced + github_actions_allowed_repos: + - "MyOrg/infrastructure" + attributes: ["gitops"] + iam_policies: + - gitops + gitops_policy_configuration: + s3_bucket_component_name: gitops/s3-bucket + dynamodb_component_name: gitops/dynamodb +``` + +Example using for lambda-cicd (predefined policy): + +```yaml +# stacks/catalog/github-oidc-role/lambda-cicd.yaml +import: + - catalog/github-oidc-role/defaults + +components: + terraform: + github-oidc-role/lambda-cicd: + metadata: + component: github-oidc-role + inherits: + - github-oidc-role/defaults + vars: + enabled: true + github_actions_allowed_repos: + - MyOrg/example-app-on-lambda-with-gha + attributes: ["lambda-cicd"] + iam_policies: + - lambda-cicd + lambda_cicd_policy_configuration: + enable_ssm_access: true + enable_s3_access: true + s3_bucket_component_name: s3-bucket/github-action-artifacts + s3_bucket_environment_name: gbl + s3_bucket_stage_name: artifacts + s3_bucket_tenant_name: core +``` + +Example Using an AWS Managed policy and a custom inline policy: + +```yaml +# stacks/catalog/github-oidc-role/custom.yaml +import: + - catalog/github-oidc-role/defaults + +components: + terraform: + github-oidc-role/custom: + metadata: + component: github-oidc-role + inherits: + - github-oidc-role/defaults + vars: + enabled: true + github_actions_allowed_repos: + - MyOrg/example-app-on-lambda-with-gha + attributes: ["custom"] + iam_policies: + - arn:aws:iam::aws:policy/AdministratorAccess + iam_policy: + - version: "2012-10-17" + statements: + - effect: "Allow" + actions: + - "ec2:*" + resources: + - "*" +``` + +### Adding Custom Policies + +There are two methods for adding custom policies to the IAM role. + +1. Through the `iam_policy` input which you can use to add inline policies to the IAM role. +2. By defining policies in Terraform and then attaching them to roles by name. + +#### Defining Custom Policies in Terraform + +1. Give the policy a unique name, e.g. `docker-publish`. We will use `NAME` as a placeholder for the name in the + instructions below. +2. Create a file in the component directory (i.e. `github-oidc-role`) with the name `policy_NAME.tf`. +3. In that file, conditionally (based on need) create a policy document as follows: + + ```hcl + locals { + NAME_policy_enabled = contains(var.iam_policies, "NAME") + NAME_policy = local.NAME_policy_enabled ? one(data.aws_iam_policy_document.NAME.*.json) : null + } + + data "aws_iam_policy_document" "NAME" { + count = local.NAME_policy_enabled ? 1 : 0 + + # Define the policy here + } + ``` + + Note that you can also add input variables and outputs to this file if desired. Just make sure that all inputs are + optional. + +4. Create a file named `additional-policy-map_override.tf` in the component directory (if it does not already exist). + This is a [terraform override file](https://developer.hashicorp.com/terraform/language/files/override), meaning its + contents will be merged with the main terraform file, and any locals defined in it will override locals defined in + other files. Having your code in this separate override file makes it possible for the component to provide a + placeholder local variable so that it works without customization, while allowing you to customize the component and + still update it without losing your customizations. +5. In that file, redefine the local variable `overridable_additional_custom_policy_map` map as follows: + + ```hcl + locals { + overridable_additional_custom_policy_map = { + "NAME" = local.NAME_policy + } + } + ``` + + If you have multiple custom policies, using just this one file, add each policy document to the map in the form + `NAME = local.NAME_policy`. + +6. With that done, you can now attach that policy by adding the name to the `iam_policies` list. For example: + + ```yaml + iam_policies: + - "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" + - "NAME" + ``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [dynamodb](#module\_dynamodb) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [gha\_assume\_role](#module\_gha\_assume\_role) | ../account-map/modules/team-assume-role-policy | n/a | +| [iam\_policy](#module\_iam\_policy) | cloudposse/iam-policy/aws | 2.0.1 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [s3\_artifacts\_bucket](#module\_s3\_artifacts\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [s3\_bucket](#module\_s3\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_role.github_actions](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_policy_document.gitops_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.lambda_cicd_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [github\_actions\_allowed\_repos](#input\_github\_actions\_allowed\_repos) | A list of the GitHub repositories that are allowed to assume this role from GitHub Actions. For example,
["cloudposse/infra-live"]. Can contain "*" as wildcard.
If org part of repo name is omitted, "cloudposse" will be assumed. | `list(string)` | `[]` | no | +| [gitops\_policy\_configuration](#input\_gitops\_policy\_configuration) | Configuration for the GitOps IAM Policy, valid keys are
- `s3_bucket_component_name` - Component Name of where to store the TF Plans in S3, defaults to `gitops/s3-bucket`
- `dynamodb_component_name` - Component Name of where to store the TF Plans in Dynamodb, defaults to `gitops/dynamodb`
- `s3_bucket_environment_name` - Environment name for the S3 Bucket, defaults to current environment
- `dynamodb_environment_name` - Environment name for the Dynamodb Table, defaults to current environment |
object({
s3_bucket_component_name = optional(string, "gitops/s3-bucket")
dynamodb_component_name = optional(string, "gitops/dynamodb")
s3_bucket_environment_name = optional(string)
dynamodb_environment_name = optional(string)
})
| `{}` | no | +| [iam\_policies](#input\_iam\_policies) | List of policies to attach to the IAM role, should be either an ARN of an AWS Managed Policy or a name of a custom policy e.g. `gitops` | `list(string)` | `[]` | no | +| [iam\_policy](#input\_iam\_policy) | IAM policy as list of Terraform objects, compatible with Terraform `aws_iam_policy_document` data source
except that `source_policy_documents` and `override_policy_documents` are not included.
Use inputs `iam_source_policy_documents` and `iam_override_policy_documents` for that. |
list(object({
policy_id = optional(string, null)
version = optional(string, null)
statements = list(object({
sid = optional(string, null)
effect = optional(string, null)
actions = optional(list(string), null)
not_actions = optional(list(string), null)
resources = optional(list(string), null)
not_resources = optional(list(string), null)
conditions = optional(list(object({
test = string
variable = string
values = list(string)
})), [])
principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
not_principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
}))
}))
| `[]` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [lambda\_cicd\_policy\_configuration](#input\_lambda\_cicd\_policy\_configuration) | Configuration for the lambda-cicd policy. The following keys are supported:
- `enable_kms_access` - (bool) - Whether to allow access to KMS. Defaults to false.
- `enable_ssm_access` - (bool) - Whether to allow access to SSM. Defaults to false.
- `enable_s3_access` - (bool) - Whether to allow access to S3. Defaults to false.
- `s3_bucket_component_name` - (string) - The name of the component to use for the S3 bucket. Defaults to `s3-bucket/github-action-artifacts`.
- `s3_bucket_environment_name` - (string) - The name of the environment to use for the S3 bucket. Defaults to the environment of the current module.
- `s3_bucket_tenant_name` - (string) - The name of the tenant to use for the S3 bucket. Defaults to the tenant of the current module.
- `s3_bucket_stage_name` - (string) - The name of the stage to use for the S3 bucket. Defaults to the stage of the current module.
- `enable_lambda_update` - (bool) - Whether to allow access to update lambda functions. Defaults to false. |
object({
enable_kms_access = optional(bool, false)
enable_ssm_access = optional(bool, false)
enable_s3_access = optional(bool, false)
s3_bucket_component_name = optional(string, "s3-bucket/github-action-artifacts")
s3_bucket_environment_name = optional(string)
s3_bucket_tenant_name = optional(string)
s3_bucket_stage_name = optional(string)
enable_lambda_update = optional(bool, false)
})
| `{}` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [github\_actions\_iam\_role\_arn](#output\_github\_actions\_iam\_role\_arn) | ARN of IAM role for GitHub Actions | +| [github\_actions\_iam\_role\_name](#output\_github\_actions\_iam\_role\_name) | Name of IAM role for GitHub Actions | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/github-oidc-role) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/github-oidc-role/additional-policy-map.tf b/modules/github-oidc-role/additional-policy-map.tf new file mode 100644 index 000000000..2d0aea69b --- /dev/null +++ b/modules/github-oidc-role/additional-policy-map.tf @@ -0,0 +1,11 @@ +locals { + # If you have custom policies, override this declaration by creating + # a file called `additional-policy-map_override.tf`. + # Then add the custom policies to the overridable_additional_custom_policy_map in that file. + # The key should be the policy you want to override, the value is the json policy document. + # See the README in `github-oidc-role` for more details. + overridable_additional_custom_policy_map = { + # Example: + # gitops = aws_iam_policy.my_custom_gitops_policy.policy + } +} diff --git a/modules/github-oidc-role/context.tf b/modules/github-oidc-role/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/github-oidc-role/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/github-oidc-role/main.tf b/modules/github-oidc-role/main.tf new file mode 100644 index 000000000..7ad3e55b1 --- /dev/null +++ b/modules/github-oidc-role/main.tf @@ -0,0 +1,50 @@ +locals { + enabled = module.this.enabled + managed_policies = [for arn in var.iam_policies : arn if can(regex("^arn:aws[^:]*:iam::aws:policy/", arn))] + policies = length(local.managed_policies) > 0 ? local.managed_policies : null + policy_document_map = { + "gitops" = local.gitops_policy + "lambda_cicd" = local.lambda_cicd_policy + "inline_policy" = one(module.iam_policy.*.json) + } + custom_policy_map = merge(local.policy_document_map, local.overridable_additional_custom_policy_map) + + # Ignore empty policies of the form `"{}"` as well as null policies + active_policy_map = { for k, v in local.custom_policy_map : k => v if try(length(v), 0) > 3 } +} + +module "iam_policy" { + enabled = local.enabled && length(var.iam_policy) > 0 + + source = "cloudposse/iam-policy/aws" + version = "2.0.1" + + iam_policy = var.iam_policy + + context = module.this.context +} + +module "gha_assume_role" { + source = "../account-map/modules/team-assume-role-policy" + + trusted_github_repos = var.github_actions_allowed_repos + + context = module.this.context +} + +resource "aws_iam_role" "github_actions" { + count = local.enabled ? 1 : 0 + + name = module.this.id + assume_role_policy = module.gha_assume_role.github_assume_role_policy + + managed_policy_arns = local.policies + + dynamic "inline_policy" { + for_each = local.active_policy_map + content { + name = inline_policy.key + policy = inline_policy.value + } + } +} diff --git a/modules/github-oidc-role/outputs.tf b/modules/github-oidc-role/outputs.tf new file mode 100644 index 000000000..20d0b1503 --- /dev/null +++ b/modules/github-oidc-role/outputs.tf @@ -0,0 +1,9 @@ +output "github_actions_iam_role_arn" { + value = one(aws_iam_role.github_actions[*].arn) + description = "ARN of IAM role for GitHub Actions" +} + +output "github_actions_iam_role_name" { + value = one(aws_iam_role.github_actions[*].name) + description = "Name of IAM role for GitHub Actions" +} diff --git a/modules/github-oidc-role/policy_gitops.tf b/modules/github-oidc-role/policy_gitops.tf new file mode 100644 index 000000000..5c91edc88 --- /dev/null +++ b/modules/github-oidc-role/policy_gitops.tf @@ -0,0 +1,113 @@ +variable "gitops_policy_configuration" { + type = object({ + s3_bucket_component_name = optional(string, "gitops/s3-bucket") + dynamodb_component_name = optional(string, "gitops/dynamodb") + s3_bucket_environment_name = optional(string) + dynamodb_environment_name = optional(string) + }) + default = {} + nullable = false + description = <<-EOT + Configuration for the GitOps IAM Policy, valid keys are + - `s3_bucket_component_name` - Component Name of where to store the TF Plans in S3, defaults to `gitops/s3-bucket` + - `dynamodb_component_name` - Component Name of where to store the TF Plans in Dynamodb, defaults to `gitops/dynamodb` + - `s3_bucket_environment_name` - Environment name for the S3 Bucket, defaults to current environment + - `dynamodb_environment_name` - Environment name for the Dynamodb Table, defaults to current environment + EOT +} + +locals { + gitops_policy_enabled = contains(var.iam_policies, "gitops") + gitops_policy = local.gitops_policy_enabled ? one(data.aws_iam_policy_document.gitops_iam_policy.*.json) : null + + s3_bucket_arn = one(module.s3_bucket[*].outputs.bucket_arn) + dynamodb_table_arn = one(module.dynamodb[*].outputs.table_arn) +} + +module "s3_bucket" { + count = local.gitops_policy_enabled ? 1 : 0 + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = lookup(var.gitops_policy_configuration, "s3_bucket_component_name", "gitops/s3-bucket") + environment = lookup(var.gitops_policy_configuration, "s3_bucket_environment_name", module.this.environment) + + context = module.this.context +} + +module "dynamodb" { + count = local.gitops_policy_enabled ? 1 : 0 + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = lookup(var.gitops_policy_configuration, "dynamodb_component_name", module.this.environment) + environment = lookup(var.gitops_policy_configuration, "dynamodb_environment_name", module.this.environment) + + context = module.this.context +} + +data "aws_iam_policy_document" "gitops_iam_policy" { + count = local.gitops_policy_enabled ? 1 : 0 + + # Allow access to the Dynamodb table used to store TF Plans + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_dynamodb_specific-table.html + statement { + sid = "AllowDynamodbAccess" + effect = "Allow" + actions = [ + "dynamodb:List*", + "dynamodb:DescribeReservedCapacity*", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTimeToLive" + ] + resources = [ + "*" + ] + } + statement { + sid = "AllowDynamodbTableAccess" + effect = "Allow" + actions = [ + "dynamodb:BatchGet*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Get*", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:BatchWrite*", + "dynamodb:CreateTable", + "dynamodb:Delete*", + "dynamodb:Update*", + "dynamodb:PutItem" + ] + resources = [ + local.dynamodb_table_arn, + "${local.dynamodb_table_arn}/*" + ] + } + + # Allow access to the S3 Bucket used to store TF Plans + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html + statement { + sid = "AllowS3Actions" + effect = "Allow" + actions = [ + "s3:ListBucket" + ] + resources = [ + local.s3_bucket_arn + ] + } + statement { + sid = "AllowS3ObjectActions" + effect = "Allow" + actions = [ + "s3:*Object" + ] + resources = [ + "${local.s3_bucket_arn}/*" + ] + } +} diff --git a/modules/github-oidc-role/policy_lambda-cicd.tf b/modules/github-oidc-role/policy_lambda-cicd.tf new file mode 100644 index 000000000..b11efdc91 --- /dev/null +++ b/modules/github-oidc-role/policy_lambda-cicd.tf @@ -0,0 +1,113 @@ +variable "lambda_cicd_policy_configuration" { + type = object({ + enable_kms_access = optional(bool, false) + enable_ssm_access = optional(bool, false) + enable_s3_access = optional(bool, false) + s3_bucket_component_name = optional(string, "s3-bucket/github-action-artifacts") + s3_bucket_environment_name = optional(string) + s3_bucket_tenant_name = optional(string) + s3_bucket_stage_name = optional(string) + enable_lambda_update = optional(bool, false) + }) + default = {} + nullable = false + description = <<-EOT + Configuration for the lambda-cicd policy. The following keys are supported: + - `enable_kms_access` - (bool) - Whether to allow access to KMS. Defaults to false. + - `enable_ssm_access` - (bool) - Whether to allow access to SSM. Defaults to false. + - `enable_s3_access` - (bool) - Whether to allow access to S3. Defaults to false. + - `s3_bucket_component_name` - (string) - The name of the component to use for the S3 bucket. Defaults to `s3-bucket/github-action-artifacts`. + - `s3_bucket_environment_name` - (string) - The name of the environment to use for the S3 bucket. Defaults to the environment of the current module. + - `s3_bucket_tenant_name` - (string) - The name of the tenant to use for the S3 bucket. Defaults to the tenant of the current module. + - `s3_bucket_stage_name` - (string) - The name of the stage to use for the S3 bucket. Defaults to the stage of the current module. + - `enable_lambda_update` - (bool) - Whether to allow access to update lambda functions. Defaults to false. + EOT +} + +locals { + lambda_cicd_policy_enabled = contains(var.iam_policies, "lambda-cicd") + lambda_cicd_policy = local.lambda_cicd_policy_enabled ? one(data.aws_iam_policy_document.lambda_cicd_policy.*.json) : null + + lambda_bucket_arn = try(module.s3_artifacts_bucket[0].outputs.bucket_arn, null) +} + +module "s3_artifacts_bucket" { + count = lookup(var.lambda_cicd_policy_configuration, "enable_s3_access", false) ? 1 : 0 + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = lookup(var.lambda_cicd_policy_configuration, "s3_bucket_component_name", "s3-bucket/github-action-artifacts") + environment = lookup(var.lambda_cicd_policy_configuration, "s3_bucket_environment_name", module.this.environment) + tenant = lookup(var.lambda_cicd_policy_configuration, "s3_bucket_tenant_name", module.this.tenant) + stage = lookup(var.lambda_cicd_policy_configuration, "s3_bucket_stage_name", module.this.stage) + + context = module.this.context +} + +data "aws_iam_policy_document" "lambda_cicd_policy" { + count = local.lambda_cicd_policy_enabled ? 1 : 0 + + dynamic "statement" { + for_each = lookup(var.lambda_cicd_policy_configuration, "enable_kms_access", false) ? [1] : [] + content { + sid = "AllowKMSAccess" + effect = "Allow" + actions = [ + "kms:DescribeKey", + "kms:Encrypt", + ] + resources = [ + "*" + ] + } + } + + dynamic "statement" { + for_each = lookup(var.lambda_cicd_policy_configuration, "enable_ssm_access", false) ? [1] : [] + content { + effect = "Allow" + actions = [ + "ssm:GetParameter", + "ssm:GetParameters", + "ssm:GetParametersByPath", + "ssm:DescribeParameters", + "ssm:PutParameter" + ] + resources = [ + "arn:aws:ssm:*:*:parameter/lambda/*" + ] + } + } + + dynamic "statement" { + for_each = lookup(var.lambda_cicd_policy_configuration, "enable_s3_access", false) && local.lambda_bucket_arn != null ? [1] : [] + content { + effect = "Allow" + actions = [ + "s3:HeadObject", + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ] + resources = [ + local.lambda_bucket_arn, + ] + } + } + + dynamic "statement" { + for_each = lookup(var.lambda_cicd_policy_configuration, "enable_lambda_update", false) ? [1] : [] + content { + effect = "Allow" + actions = [ + "lambda:UpdateFunctionCode", + "lambda:UpdateFunctionConfiguration" + ] + resources = [ + "*" + ] + } + } +} diff --git a/modules/github-oidc-role/providers.tf b/modules/github-oidc-role/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/github-oidc-role/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/github-oidc-role/variables.tf b/modules/github-oidc-role/variables.tf new file mode 100644 index 000000000..29d0561f5 --- /dev/null +++ b/modules/github-oidc-role/variables.tf @@ -0,0 +1,56 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "iam_policies" { + type = list(string) + description = "List of policies to attach to the IAM role, should be either an ARN of an AWS Managed Policy or a name of a custom policy e.g. `gitops`" + default = [] +} + +variable "iam_policy" { + type = list(object({ + policy_id = optional(string, null) + version = optional(string, null) + statements = list(object({ + sid = optional(string, null) + effect = optional(string, null) + actions = optional(list(string), null) + not_actions = optional(list(string), null) + resources = optional(list(string), null) + not_resources = optional(list(string), null) + conditions = optional(list(object({ + test = string + variable = string + values = list(string) + })), []) + principals = optional(list(object({ + type = string + identifiers = list(string) + })), []) + not_principals = optional(list(object({ + type = string + identifiers = list(string) + })), []) + })) + })) + description = <<-EOT + IAM policy as list of Terraform objects, compatible with Terraform `aws_iam_policy_document` data source + except that `source_policy_documents` and `override_policy_documents` are not included. + Use inputs `iam_source_policy_documents` and `iam_override_policy_documents` for that. + EOT + default = [] + nullable = false +} + + +variable "github_actions_allowed_repos" { + type = list(string) + description = < [!TIP] +> +> We also have a similar component based on +> [actions-runner-controller](https://github.com/actions-runner-controller/actions-runner-controller) for Kubernetes. + +## Requirements + ## Usage **Stack Level**: Regional @@ -13,16 +27,16 @@ components: terraform: github-runners: vars: + cpu_utilization_high_threshold_percent: 5 + cpu_utilization_low_threshold_percent: 1 + default_cooldown: 300 github_scope: company instance_type: "t3.small" - min_size: 1 max_size: 10 - default_cooldown: 300 + min_size: 1 + runner_group: default scale_down_cooldown_seconds: 2700 wait_for_capacity_timeout: 10m - cpu_utilization_high_threshold_percent: 5 - cpu_utilization_low_threshold_percent: 1 - spot_maxprice: 0.02 mixed_instances_policy: instances_distribution: on_demand_allocation_strategy: "prioritized" @@ -60,13 +74,233 @@ components: Prior to deployment, the API Token must exist in SSM. -To generate the token, please follow [these instructions](https://cloudposse.atlassian.net/l/c/N4dH05ud). Once generated, write the API token to the SSM key store at the following location within the same AWS account and region where the GitHub Actions runner pool will reside. +To generate the token, please follow [these instructions](https://cloudposse.atlassian.net/l/c/N4dH05ud). Once +generated, write the API token to the SSM key store at the following location within the same AWS account and region +where the GitHub Actions runner pool will reside. ``` assume-role chamber write github/runners/ registration-token ghp_secretstring ``` +## Background + +### Registration + +Github Actions Self-Hosted runners can be scoped to the Github Organization, a Single Repository, or a group of +Repositories (Github Enterprise-Only). Upon startup, each runner uses a `REGISTRATION_TOKEN` to call the Github API to +register itself with the Organization, Repository, or Runner Group (Github Enterprise). + +### Running Workflows + +Once a Self-Hosted runner is registered, you will have to update your workflow with the `runs-on` attribute specify it +should run on a self-hosted runner: + +``` +name: Test Self Hosted Runners +on: + push: + branches: [main] +jobs: + build: + runs-on: [self-hosted] +``` + +### Workflow Github Permissions (GITHUB_TOKEN) + +Each run of the Github Actions Workflow is assigned a GITHUB_TOKEN, which allows your workflow to perform actions +against Github itself such as cloning a repo, updating the checks API status, etc., and expires at the end of the +workflow run. The GITHUB_TOKEN has two permission "modes" it can operate in `Read and write permissions` ("Permissive" +or "Full Access") and `Read repository contents permission` ("Restricted" or "Read-Only"). By default, the GITHUB_TOKEN +is granted Full Access permissions, but you can change this via the Organization or Repo settings. If you opt for the +Read-Only permissions, you can optionally grant or revoke access to specific APIs via the workflow `yaml` file and a +full list of APIs that can be accessed can be found in the +[documentation](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token) +and is shown below in the table. It should be noted that the downside to this permissions model is that any user with +write access to the repository can escalate permissions for the workflow by updating the `yaml` file, however, the APIs +available via this token are limited. Most notably the GITHUB_TOKEN does not have access to the `users`, `repos`, +`apps`, `billing`, or `collaborators` APIs, so the tokens do not have access to modify sensitive settings or add/remove +users from the Organization/Repository. + +
+ +> Example of using escalated permissions for the entire workflow + +``` +name: Pull request labeler +on: [ pull_request_target ] +permissions: + contents: read + pull-requests: write +jobs: + triage: + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} +``` + +> Example of using escalated permissions for a job + +``` +name: Create issue on commit +on: [ push ] +jobs: + create_commit: + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - name: Create issue using REST API + run: | + curl --request POST \ + --url https://api.github.com/repos/${{ github.repository }}/issues \ + --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ + --header 'content-type: application/json' \ + --data '{ + "title": "Automated issue for commit: ${{ github.sha }}", + "body": "This issue was automatically created by the GitHub Action workflow **${{ github.workflow }}**. \n\n The commit hash was: _${{ github.sha }}_." + }' \ + --fail +``` + +### Pre-Requisites for Using This Component + +In order to use this component, you will have to obtain the `REGISTRATION_TOKEN` mentioned above from your Github +Organization or Repository and store it in SSM Parameter store. In addition, it is recommended that you set the +permissions β€œmode” for Self-hosted runners to Read-Only. The instructions for doing both are below. + +#### Workflow Permissions + +1. Browse to + [https://github.com/organizations/{Org}/settings/actions](https://github.com/organizations/{Org}/settings/actions) + (Organization) or + [https://github.com/{Org}/{Repo}/settings/actions](https://github.com/{Org}/{Repo}/settings/actions) (Repository) + +2. Set the default permissions for the GITHUB_TOKEN to Read Only + +
+ +### Creating Registration Token + +> [!TIP] +> +> We highly recommend using a GitHub Application with the github-action-token-rotator module to generate the +> Registration Token. This will ensure that the token is rotated and that the token is stored in SSM Parameter Store +> encrypted with KMS. + +#### GitHub Application + +Follow the quickstart with the upstream module, +[cloudposse/terraform-aws-github-action-token-rotator](https://github.com/cloudposse/terraform-aws-github-action-token-rotator#quick-start), +or follow the steps below. + +1. Create a new GitHub App +1. Add the following permission: + +```diff +# Required Permissions for Repository Runners: +## Repository Permissions ++ Actions (read) ++ Administration (read / write) ++ Metadata (read) + +# Required Permissions for Organization Runners: +## Repository Permissions ++ Actions (read) ++ Metadata (read) + +## Organization Permissions ++ Self-hosted runners (read / write) +``` + +1. Generate a Private Key + +If you are working with Cloud Posse, upload this Private Key, GitHub App ID, and Github App Installation ID to 1Password +and skip the rest. Otherwise, complete the private key setup in `core--auto`. + +1. Convert the private key to a PEM file using the following command: + `openssl pkcs8 -topk8 -inform PEM -outform PEM -nocrypt -in {DOWNLOADED_FILE_NAME}.pem -out private-key-pkcs8.key` +1. Upload PEM file key to the specified ssm path: `/github/runners/acme/private-key` in `core--auto` +1. Create another sensitive SSM parameter `/github/runners/acme/registration-token` in `core--auto` with + any basic value, such as "foo". This will be overwritten by the rotator. +1. Update the GitHub App ID and Installation ID in the `github-action-token-rotator` catalog. + +> [!TIP] +> +> If you change the Private Key saved in SSM, redeploy `github-action-token-rotator` + +#### (ClickOps) Obtain the Runner Registration Token + +1. Browse to + [https://github.com/organizations/{Org}/settings/actions/runners](https://github.com/organizations/{Org}/settings/actions/runners) + (Organization) or + [https://github.com/{Org}/{Repo}/settings/actions/runners](https://github.com/{Org}/{Repo}/settings/actions/runners) + (Repository) + +2. Click the **New Runner** button (Organization) or **New Self Hosted Runner** button (Repository) + +3. Copy the Github Runner token from the next screen. Note that this is the only time you will see this token. Note that + if you exit the `New {Self Hosted} Runner` screen and then later return by clicking the `New {Self Hosted} Runner` + button again, the registration token will be invalidated and a new token will be generated. + +
+ +4. Add the `REGISTRATION_TOKEN` to the `/github/token` SSM parameter in the account where Github runners are hosted + (usually `automation`), encrypted with KMS. + +``` +chamber write github token +``` + +# FAQ + +## The GitHub Registration Token is not updated in SSM + +The `github-action-token-rotator` runs an AWS Lambda function every 30 minutes. This lambda will attempt to use a +private key in its environment configuration to generate a GitHub Registration Token, and then store that token to AWS +SSM Parameter Store. + +If the GitHub Registration Token parameter, `/github/runners/acme/registration-token`, is not updated, read through the +following tips: + +1. The private key is stored at the given parameter path: + `parameter_store_private_key_path: /github/runners/acme/private-key` +1. The private key is Base 64 encoded. If you pull the key from SSM and decode it, it should begin with + `-----BEGIN PRIVATE KEY-----` +1. If the private key has changed, you must _redeploy_ `github-action-token-rotator`. Run a plan against the component + to make sure there are not changes required. + +## The GitHub Registration Token is valid, but the Runners are not registering with GitHub + +If you first deployed the `github-action-token-rotator` component initially with an invalid configuration and then +deployed the `github-runners` component, the instance runners will have failed to register with GitHub. + +After you correct `github-action-token-rotator` and have a valid GitHub Registration Token in SSM, _destroy and +recreate_ the `github-runners` component. + +If you cannot see the runners registered in GitHub, check the system logs on one of EC2 Instances in AWS in +`core--auto`. + +## I cannot assume the role from GitHub Actions after deploying + +The following error is very common if the GitHub workflow is missing proper permission. + +```bash +Error: User: arn:aws:sts::***:assumed-role/acme-core-use1-auto-actions-runner@actions-runner-system/token-file-web-identity is not authorized to perform: sts:TagSession on resource: arn:aws:iam::999999999999:role/acme-plat-use1-dev-gha +``` + +In order to use a web identity, GitHub Action pipelines must have the following permission. See +[GitHub Action documentation for more](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services#adding-permissions-settings). + +```yaml +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout +``` + + ## Requirements @@ -87,13 +321,13 @@ chamber write github/runners/ registration-token ghp_secretstring | Name | Source | Version | |------|--------|---------| -| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.1 | -| [autoscale\_group](#module\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.30.1 | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [autoscale\_group](#module\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.35.1 | | [graceful\_scale\_in](#module\_graceful\_scale\_in) | ./modules/graceful_scale_in | n/a | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [sg](#module\_sg) | cloudposse/security-group/aws | 1.0.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.1.1 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -136,8 +370,6 @@ chamber write github/runners/ registration-token ghp_secretstring | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [github\_scope](#input\_github\_scope) | Scope of the runner (e.g. `cloudposse/example` for repo or `cloudposse` for org) | `string` | n/a | yes | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [instance\_type](#input\_instance\_type) | Default instance type for the action runner. | `string` | `"m5.large"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | @@ -151,6 +383,7 @@ chamber write github/runners/ registration-token ghp_secretstring | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [runner\_group](#input\_runner\_group) | GitHub runner group | `string` | `"default"` | no | | [runner\_labels](#input\_runner\_labels) | List of labels to add to the GitHub Runner (e.g. 'Amazon Linux 2'). | `list(string)` | `[]` | no | | [runner\_role\_additional\_policy\_arns](#input\_runner\_role\_additional\_policy\_arns) | List of policy ARNs that will be attached to the runners' default role on creation in addition to the defaults | `list(string)` | `[]` | no | | [runner\_version](#input\_runner\_version) | GitHub runner release version | `string` | `"2.288.1"` | no | @@ -177,8 +410,95 @@ chamber write github/runners/ registration-token ghp_secretstring | [iam\_role\_arn](#output\_iam\_role\_arn) | The ARN of the IAM role associated with the Autoscaling Group | | [ssm\_document\_arn](#output\_ssm\_document\_arn) | The ARN of the SSM document. | + + +## FAQ + +### Can we scope it to a github org with both private and public repos ? + +Yes but this requires Github Enterprise Cloud and the usage of runner groups to scope permissions of runners to specific +repos. If you set the scope to the entire org without runner groups and if the org has both public and private repos, +then the risk of using a self-hosted runner incorrectly is a vulnerability within public repos. + +[https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) + +If you do not have github enterprise cloud and runner groups cannot be utilized, then it’s best to create new github +runners per repo or use the summerwind action-runners-controller via a Github App to set the scope to specific repos. + +### How can we see the current spot pricing? + +Go to [ec2instances.info](http://ec2instances.info/) + +### If we don’t use mixed at all does that mean we can’t do spot? + +It’s possible to do spot without using mixed instances but you leave yourself open to zero instance availability with a +single instance type. + +For example, if you wanted to use spot and use `t3.xlarge` in `us-east-2` and for some reason, AWS ran out of +`t3.xlarge`, you wouldn't have the option to choose another instance type and so all the GitHub Action runs would stall +until availability returned. If you use on-demand pricing, it’s more expensive, but you’re more likely to get scheduling +priority. For guaranteed availability, reserved instances are required. + +### Do the overrides apply to both the on-demand and the spot instances, or only the spot instances? + +Since the overrides affect the launch template, I believe they will affect both spot instances and override since +weighted capacity can be set for either or. The override terraform option is on the ASG’s `launch_template` + +> List of nested arguments provides the ability to specify multiple instance types. This will override the same +> parameter in the launch template. For on-demand instances, Auto Scaling considers the order of preference of instance +> types to launch based on the order specified in the overrides list. Defined below. And in the terraform resource for +> `instances_distribution` + +> `spot_max_price` - (Optional) Maximum price per unit hour that the user is willing to pay for the Spot instances. +> Default: an empty string which means the on-demand price. For a `mixed_instances_policy`, this will do purely +> on-demand + +``` + mixed_instances_policy: + instances_distribution: + on_demand_allocation_strategy: "prioritized" + on_demand_base_capacity: 1 + on_demand_percentage_above_base_capacity: 0 + spot_allocation_strategy: "capacity-optimized" + spot_instance_pools: null + spot_max_price: [] +``` + +This will always do spot unless instances are unavailable, then switch to on-demand. + +``` + mixed_instances_policy: + instances_distribution: + # ... + spot_max_price: 0.05 +``` + +If you want a single instance type, you could still use the mixed instances policy to define that like above, or you can +use these other inputs and comment out the `mixed_instances_policy` + +``` + instance_type: "t3.xlarge" + # the below is optional in order to set the spot max price + instance_market_options: + market_type = "spot" + spot_options: + block_duration_minutes: 6000 + instance_interruption_behavior: terminate + max_price: 0.05 + spot_instance_type = persistent + valid_until: null +``` + +The `overrides` will override the `instance_type` above. ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/github-runners) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/github-runners) - + Cloud Posse's upstream component +- [AWS: Auto Scaling groups with multiple instance types and purchase options](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) +- [InstancesDistribution](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_InstancesDistribution.html) + +* [MixedInstancesPolicy](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_MixedInstancesPolicy.html) +* [Terraform ASG `Override` Attribute](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group#override) [](https://cpco.io/component) diff --git a/modules/github-runners/default.auto.tfvars b/modules/github-runners/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/github-runners/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/github-runners/main.tf b/modules/github-runners/main.tf index faab3f2c2..d7659f151 100644 --- a/modules/github-runners/main.tf +++ b/modules/github-runners/main.tf @@ -73,6 +73,7 @@ data "cloudinit_config" "config" { pre_install = var.userdata_pre_install post_install = var.userdata_post_install runner_version = var.runner_version + runner_group = var.runner_group }) } } @@ -106,7 +107,7 @@ module "sg" { module "autoscale_group" { source = "cloudposse/ec2-autoscale-group/aws" - version = "0.30.1" + version = "0.35.1" image_id = join("", data.aws_ami.runner.*.id) instance_type = var.instance_type diff --git a/modules/github-runners/modules/graceful_scale_in/outputs.tf b/modules/github-runners/modules/graceful_scale_in/outputs.tf index fd24132a8..a87e00fa0 100644 --- a/modules/github-runners/modules/graceful_scale_in/outputs.tf +++ b/modules/github-runners/modules/graceful_scale_in/outputs.tf @@ -16,4 +16,4 @@ output "autoscaling_lifecycle_hook_name" { output "ssm_document_arn" { description = "The ARN of the SSM document." value = join("", aws_ssm_document.default.*.arn) -} \ No newline at end of file +} diff --git a/modules/github-runners/modules/graceful_scale_in/variables.tf b/modules/github-runners/modules/graceful_scale_in/variables.tf index ac97b5568..1aee59ef2 100644 --- a/modules/github-runners/modules/graceful_scale_in/variables.tf +++ b/modules/github-runners/modules/graceful_scale_in/variables.tf @@ -6,4 +6,4 @@ variable "autoscaling_group_name" { variable "command" { description = "Command to run on EC2 instance shutdown." type = string -} \ No newline at end of file +} diff --git a/modules/github-runners/providers.tf b/modules/github-runners/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/github-runners/providers.tf +++ b/modules/github-runners/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/github-runners/remote-state.tf b/modules/github-runners/remote-state.tf index 22923da60..e96cf2bec 100644 --- a/modules/github-runners/remote-state.tf +++ b/modules/github-runners/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.1" + version = "1.5.0" component = "vpc" @@ -9,7 +9,7 @@ module "vpc" { module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.1.1" + version = "1.5.0" component = "account-map" environment = var.account_map_environment_name diff --git a/modules/github-runners/templates/amazon-cloudwatch-agent.json b/modules/github-runners/templates/amazon-cloudwatch-agent.json index 2372a61d8..d6e5d2e16 100644 --- a/modules/github-runners/templates/amazon-cloudwatch-agent.json +++ b/modules/github-runners/templates/amazon-cloudwatch-agent.json @@ -134,4 +134,4 @@ }, "force_flush_interval": 15 } -} \ No newline at end of file +} diff --git a/modules/github-runners/templates/user-data.sh b/modules/github-runners/templates/user-data.sh index 440b6b86d..d640d1acf 100644 --- a/modules/github-runners/templates/user-data.sh +++ b/modules/github-runners/templates/user-data.sh @@ -64,4 +64,4 @@ export USER=root ${post_install} ls -la /tmp -/tmp/create-latest-svc.sh ${github_scope} "" $NODE_NAME $USER $LABELS +/tmp/create-latest-svc.sh ${github_scope} "" $NODE_NAME $USER $LABELS ${runner_group} diff --git a/modules/github-runners/variables.tf b/modules/github-runners/variables.tf index 52a874e8e..864901c9a 100644 --- a/modules/github-runners/variables.tf +++ b/modules/github-runners/variables.tf @@ -137,6 +137,12 @@ variable "runner_labels" { description = "List of labels to add to the GitHub Runner (e.g. 'Amazon Linux 2')." } +variable "runner_group" { + type = string + default = "default" + description = "GitHub runner group" +} + variable "runner_role_additional_policy_arns" { type = list(string) default = [] diff --git a/modules/github-webhook/README.md b/modules/github-webhook/README.md new file mode 100644 index 000000000..de07f1b17 --- /dev/null +++ b/modules/github-webhook/README.md @@ -0,0 +1,161 @@ +--- +tags: + - component/github-webhook + - layer/software-delivery + - provider/aws +--- + +# Component: `github-webhook` + +This component provisions a GitHub webhook for a single GitHub repository. + +You may want to use this component if you are provisioning webhooks for multiple ArgoCD deployment repositories across +GitHub organizations. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. This example pulls the value of the webhook from `remote-state` + +```yaml +components: + terraform: + webhook/cloudposse/argocd: + metadata: + component: github-webhook + vars: + github_organization: cloudposse + github_repository: argocd-deploy-non-prod + webhook_url: "https://argocd.ue2.dev.plat.cloudposse.org/api/webhook" + + remote_state_github_webhook_enabled: true # default value added for visibility + remote_state_component_name: eks/argocd +``` + +### SSM Stored Value Example + +Here's an example snippet for how to use this component with a value stored in SSM + +```yaml +components: + terraform: + webhook/cloudposse/argocd: + metadata: + component: github-webhook + vars: + github_organization: cloudposse + github_repository: argocd-deploy-non-prod + webhook_url: "https://argocd.ue2.dev.plat.cloudposse.org/api/webhook" + + remote_state_github_webhook_enabled: false + ssm_github_webhook_enabled: true + ssm_github_webhook: "/argocd/github/webhook" +``` + +### Input Value Example + +Here's an example snippet for how to use this component with a value stored in Terraform variables. + +```yaml +components: + terraform: + webhook/cloudposse/argocd: + metadata: + component: github-webhook + vars: + github_organization: cloudposse + github_repository: argocd-deploy-non-prod + webhook_url: "https://argocd.ue2.dev.plat.cloudposse.org/api/webhook" + + remote_state_github_webhook_enabled: false + ssm_github_webhook_enabled: false + webhook_github_secret: "abcdefg" +``` + +### ArgoCD Webhooks + +For usage with the `eks/argocd` component, see +[Creating Webhooks with `github-webhook`](https://github.com/cloudposse/terraform-aws-components/blob/main/modules/eks/argocd/README.md#creating-webhooks-with-github-webhook) +in that component's README. + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [github](#requirement\_github) | >= 4.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [github](#provider\_github) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [source](#module\_source) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [github_repository_webhook.default](https://registry.terraform.io/providers/integrations/github/latest/docs/resources/repository_webhook) | resource | +| [aws_ssm_parameter.github_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.webhook](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [github\_base\_url](#input\_github\_base\_url) | This is the target GitHub base API endpoint. Providing a value is a requirement when working with GitHub Enterprise. It is optional to provide this value and it can also be sourced from the `GITHUB_BASE_URL` environment variable. The value must end with a slash, for example: `https://terraformtesting-ghe.westus.cloudapp.azure.com/` | `string` | `null` | no | +| [github\_organization](#input\_github\_organization) | The name of the GitHub Organization where the repository lives | `string` | n/a | yes | +| [github\_repository](#input\_github\_repository) | The name of the GitHub repository where the webhook will be created | `string` | n/a | yes | +| [github\_token\_override](#input\_github\_token\_override) | Use the value of this variable as the GitHub token instead of reading it from SSM | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region. | `string` | n/a | yes | +| [remote\_state\_component\_name](#input\_remote\_state\_component\_name) | If fetching the Github Webhook value from remote-state, set this to the source component name. For example, `eks/argocd`. | `string` | `""` | no | +| [remote\_state\_github\_webhook\_enabled](#input\_remote\_state\_github\_webhook\_enabled) | If `true`, pull the GitHub Webhook value from remote-state | `bool` | `true` | no | +| [ssm\_github\_api\_key](#input\_ssm\_github\_api\_key) | SSM path to the GitHub API key | `string` | `"/argocd/github/api_key"` | no | +| [ssm\_github\_webhook](#input\_ssm\_github\_webhook) | Format string of the SSM parameter path where the webhook will be pulled from. Only used if `var.webhook_github_secret` is not given. | `string` | `"/github/webhook"` | no | +| [ssm\_github\_webhook\_enabled](#input\_ssm\_github\_webhook\_enabled) | If `true`, pull the GitHub Webhook value from AWS SSM Parameter Store using `var.ssm_github_webhook` | `bool` | `false` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [webhook\_github\_secret](#input\_webhook\_github\_secret) | The value to use as the GitHub webhook secret. Set both `var.ssm_github_webhook_enabled` and `var.remote_state_github_webhook_enabled` to `false` in order to use this value | `string` | `""` | no | +| [webhook\_url](#input\_webhook\_url) | The URL for the webhook | `string` | n/a | yes | + +## Outputs + +No outputs. + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components) - Cloud Posse's upstream + components + +[](https://cpco.io/component) diff --git a/modules/github-webhook/context.tf b/modules/github-webhook/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/github-webhook/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/github-webhook/main.tf b/modules/github-webhook/main.tf new file mode 100644 index 000000000..130a49df4 --- /dev/null +++ b/modules/github-webhook/main.tf @@ -0,0 +1,33 @@ +locals { + enabled = module.this.enabled + + remote_state_github_webhook_enabled = local.enabled && var.remote_state_github_webhook_enabled + ssm_github_webhook_enabled = local.enabled && var.ssm_github_webhook_enabled + + # If remote_state_github_webhook_enabled, get the value from remote-state + # Else if ssm_github_webhook_enabled, get the value from SSM + # Else, get the value given by var.webhook_github_secret + webhook_github_secret = local.remote_state_github_webhook_enabled ? module.source[0].outputs.github_webhook_value : (local.ssm_github_webhook_enabled ? try(data.aws_ssm_parameter.webhook[0].value, null) : var.webhook_github_secret) +} + +data "aws_ssm_parameter" "webhook" { + count = local.ssm_github_webhook_enabled ? 1 : 0 + + name = var.ssm_github_webhook + with_decryption = true +} + +resource "github_repository_webhook" "default" { + repository = var.github_repository + + configuration { + url = var.webhook_url + content_type = "json" + secret = local.webhook_github_secret + insecure_ssl = false + } + + active = true + + events = ["push"] +} diff --git a/modules/github-webhook/provider-github.tf b/modules/github-webhook/provider-github.tf new file mode 100644 index 000000000..8690b6a26 --- /dev/null +++ b/modules/github-webhook/provider-github.tf @@ -0,0 +1,33 @@ +variable "github_base_url" { + type = string + description = "This is the target GitHub base API endpoint. Providing a value is a requirement when working with GitHub Enterprise. It is optional to provide this value and it can also be sourced from the `GITHUB_BASE_URL` environment variable. The value must end with a slash, for example: `https://terraformtesting-ghe.westus.cloudapp.azure.com/`" + default = null +} + +variable "ssm_github_api_key" { + type = string + description = "SSM path to the GitHub API key" + default = "/argocd/github/api_key" +} + +variable "github_token_override" { + type = string + description = "Use the value of this variable as the GitHub token instead of reading it from SSM" + default = null +} + +locals { + github_token = local.enabled ? coalesce(var.github_token_override, try(data.aws_ssm_parameter.github_api_key[0].value, null)) : "" +} + +data "aws_ssm_parameter" "github_api_key" { + count = local.enabled ? 1 : 0 + name = var.ssm_github_api_key + with_decryption = true +} + +provider "github" { + base_url = local.enabled ? var.github_base_url : null + owner = local.enabled ? var.github_organization : null + token = local.enabled ? local.github_token : null +} diff --git a/modules/github-webhook/providers.tf b/modules/github-webhook/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/github-webhook/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/github-webhook/remote-state.tf b/modules/github-webhook/remote-state.tf new file mode 100644 index 000000000..a2a7dab14 --- /dev/null +++ b/modules/github-webhook/remote-state.tf @@ -0,0 +1,12 @@ +# This can be any component that has the required output, `github-webhook-value` +# This is typically eks/argocd +module "source" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.remote_state_github_webhook_enabled ? 1 : 0 + + component = var.remote_state_component_name + + context = module.this.context +} diff --git a/modules/github-webhook/variables.tf b/modules/github-webhook/variables.tf new file mode 100644 index 000000000..280bf31d5 --- /dev/null +++ b/modules/github-webhook/variables.tf @@ -0,0 +1,49 @@ +variable "region" { + description = "AWS Region." + type = string +} + +variable "github_repository" { + type = string + description = "The name of the GitHub repository where the webhook will be created" +} + +variable "github_organization" { + type = string + description = "The name of the GitHub Organization where the repository lives" +} + +variable "webhook_url" { + type = string + description = "The URL for the webhook" +} + +variable "webhook_github_secret" { + type = string + description = "The value to use as the GitHub webhook secret. Set both `var.ssm_github_webhook_enabled` and `var.remote_state_github_webhook_enabled` to `false` in order to use this value" + default = "" +} + +variable "ssm_github_webhook_enabled" { + type = bool + description = "If `true`, pull the GitHub Webhook value from AWS SSM Parameter Store using `var.ssm_github_webhook`" + default = false +} + +variable "ssm_github_webhook" { + type = string + description = "Format string of the SSM parameter path where the webhook will be pulled from. Only used if `var.webhook_github_secret` is not given." + default = "/github/webhook" +} + +variable "remote_state_github_webhook_enabled" { + type = bool + description = "If `true`, pull the GitHub Webhook value from remote-state" + default = true +} + +variable "remote_state_component_name" { + type = string + description = "If fetching the Github Webhook value from remote-state, set this to the source component name. For example, `eks/argocd`." + default = "" +} diff --git a/modules/github-webhook/versions.tf b/modules/github-webhook/versions.tf new file mode 100644 index 000000000..64cf5005b --- /dev/null +++ b/modules/github-webhook/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + github = { + source = "integrations/github" + version = ">= 4.0" + } + } +} diff --git a/modules/global-accelerator-endpoint-group/README.md b/modules/global-accelerator-endpoint-group/README.md index 88bd603fd..8e434bfcd 100644 --- a/modules/global-accelerator-endpoint-group/README.md +++ b/modules/global-accelerator-endpoint-group/README.md @@ -1,12 +1,20 @@ -# Component: `global-accelerator` +--- +tags: + - component/global-accelerator-endpoint-group + - layer/unassigned + - provider/aws +--- + +# Component: `global-accelerator-endpoint-group` This component is responsible for provisioning a Global Accelerator Endpoint Group. -This component assumes that the `global-accelerator` component has already been deployed to the same account in the environment specified by `var.global_accelerator_environment_name`. +This component assumes that the `global-accelerator` component has already been deployed to the same account in the +environment specified by `var.global_accelerator_environment_name`. ## Usage -**Stack Level**: Regional +**Stack Level**: Regional Here are some example snippets for how to use this component: @@ -21,13 +29,14 @@ components: - endpoint_lb_name: my-load-balancer ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers @@ -38,7 +47,7 @@ No providers. | Name | Source | Version | |------|--------|---------| | [endpoint\_group](#module\_endpoint\_group) | cloudposse/global-accelerator/aws//modules/endpoint-group | 0.5.0 | -| [global\_accelerator](#module\_global\_accelerator) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [global\_accelerator](#module\_global\_accelerator) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -60,8 +69,6 @@ No resources. | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [global\_accelerator\_environment\_name](#input\_global\_accelerator\_environment\_name) | The name of the environment where the global component `global_accelerator` is provisioned | `string` | `"gbl"` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -80,10 +87,11 @@ No resources. |------|-------------| | [id](#output\_id) | The ID of the Global Accelerator Endpoint Group. | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/global-accelerator-endpoint-group) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/global-accelerator-endpoint-group) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/global-accelerator-endpoint-group/default.auto.tfvars b/modules/global-accelerator-endpoint-group/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/global-accelerator-endpoint-group/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/global-accelerator-endpoint-group/providers.tf b/modules/global-accelerator-endpoint-group/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/global-accelerator-endpoint-group/providers.tf +++ b/modules/global-accelerator-endpoint-group/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/global-accelerator-endpoint-group/remote-state.tf b/modules/global-accelerator-endpoint-group/remote-state.tf index ca7d88eb2..2ce29033d 100644 --- a/modules/global-accelerator-endpoint-group/remote-state.tf +++ b/modules/global-accelerator-endpoint-group/remote-state.tf @@ -1,6 +1,6 @@ module "global_accelerator" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "global-accelerator" environment = var.global_accelerator_environment_name diff --git a/modules/global-accelerator-endpoint-group/versions.tf b/modules/global-accelerator-endpoint-group/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/global-accelerator-endpoint-group/versions.tf +++ b/modules/global-accelerator-endpoint-group/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/global-accelerator/README.md b/modules/global-accelerator/README.md index eadacb491..7fd22f2f7 100644 --- a/modules/global-accelerator/README.md +++ b/modules/global-accelerator/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/global-accelerator + - layer/unassigned + - provider/aws +--- + # Component: `global-accelerator` This component is responsible for provisioning AWS Global Accelerator and its listeners. @@ -25,13 +32,14 @@ global-accelerator: to_port: 443 ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers @@ -41,7 +49,7 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [flow\_logs\_bucket](#module\_flow\_logs\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [flow\_logs\_bucket](#module\_flow\_logs\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [global\_accelerator](#module\_global\_accelerator) | cloudposse/global-accelerator/aws | 0.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -68,8 +76,6 @@ No resources. | [flow\_logs\_s3\_bucket\_tenant](#input\_flow\_logs\_s3\_bucket\_tenant) | The tenant where the S3 Bucket for the Accelerator Flow Logs exists. Required if `var.flow_logs_enabled` is set to `true`. | `string` | `null` | no | | [flow\_logs\_s3\_prefix](#input\_flow\_logs\_s3\_prefix) | The Object Prefix within the S3 Bucket for the Accelerator Flow Logs. Required if `var.flow_logs_enabled` is set to `true`. | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -92,10 +98,11 @@ No resources. | [name](#output\_name) | Name of the Global Accelerator. | | [static\_ips](#output\_static\_ips) | Global Static IPs owned by the Global Accelerator. | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/global-accelerator) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/global-accelerator) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/global-accelerator/default.auto.tfvars b/modules/global-accelerator/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/global-accelerator/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/global-accelerator/providers.tf b/modules/global-accelerator/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/global-accelerator/providers.tf +++ b/modules/global-accelerator/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/global-accelerator/remote-state.tf b/modules/global-accelerator/remote-state.tf index 5db291bff..110ebf383 100644 --- a/modules/global-accelerator/remote-state.tf +++ b/modules/global-accelerator/remote-state.tf @@ -2,7 +2,7 @@ module "flow_logs_bucket" { count = var.flow_logs_enabled ? 1 : 0 source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = var.flow_logs_s3_bucket_component tenant = var.flow_logs_s3_bucket_tenant diff --git a/modules/global-accelerator/versions.tf b/modules/global-accelerator/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/global-accelerator/versions.tf +++ b/modules/global-accelerator/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/glue/catalog-database/README.md b/modules/glue/catalog-database/README.md new file mode 100644 index 000000000..1d5230a51 --- /dev/null +++ b/modules/glue/catalog-database/README.md @@ -0,0 +1,113 @@ +--- +tags: + - component/glue/catalog-database + - layer/unassigned + - provider/aws +--- + +# Component: `glue/catalog-database` + +This component provisions Glue catalog databases. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/catalog-database/example: + metadata: + component: glue/catalog-database + vars: + enabled: true + name: example + catalog_database_description: Glue catalog database example + location_uri: "s3://awsglue-datasets/examples/medicare/Medicare_Hospital_Provider.csv" + glue_iam_component_name: "glue/iam" + lakeformation_permissions_enabled: true + lakeformation_permissions: + - "ALL" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_catalog\_database](#module\_glue\_catalog\_database) | cloudposse/glue/aws//modules/glue-catalog-database | 0.4.0 | +| [glue\_iam\_role](#module\_glue\_iam\_role) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_lakeformation_permissions.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lakeformation_permissions) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [catalog\_database\_description](#input\_catalog\_database\_description) | Glue catalog database description | `string` | `null` | no | +| [catalog\_database\_name](#input\_catalog\_database\_name) | Glue catalog database name. The acceptable characters are lowercase letters, numbers, and the underscore character | `string` | `null` | no | +| [catalog\_id](#input\_catalog\_id) | ID of the Glue Catalog to create the database in. If omitted, this defaults to the AWS Account ID | `string` | `null` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_table\_default\_permission](#input\_create\_table\_default\_permission) | Creates a set of default permissions on the table for principals | `any` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [glue\_iam\_component\_name](#input\_glue\_iam\_component\_name) | Glue IAM component name. Used to get the Glue IAM role from the remote state | `string` | `"glue/iam"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [lakeformation\_permissions](#input\_lakeformation\_permissions) | List of permissions granted to the principal. Refer to https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html for more details | `list(string)` |
[
"ALL"
]
| no | +| [lakeformation\_permissions\_enabled](#input\_lakeformation\_permissions\_enabled) | Whether to enable adding Lake Formation permissions to the IAM role that is used to access the Glue database | `bool` | `true` | no | +| [location\_uri](#input\_location\_uri) | Location of the database (for example, an HDFS path) | `string` | `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [parameters](#input\_parameters) | Map of key-value pairs that define parameters and properties of the database | `map(string)` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [target\_database](#input\_target\_database) | Configuration block for a target database for resource linking |
object({
# If `target_database` is provided (not `null`), all these fields are required
catalog_id = string
database_name = string
})
| `null` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [catalog\_database\_arn](#output\_catalog\_database\_arn) | Catalog database ARN | +| [catalog\_database\_id](#output\_catalog\_database\_id) | Catalog database ID | +| [catalog\_database\_name](#output\_catalog\_database\_name) | Catalog database name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/catalog-database) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/catalog-database/context.tf b/modules/glue/catalog-database/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/catalog-database/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/catalog-database/main.tf b/modules/glue/catalog-database/main.tf new file mode 100644 index 000000000..98156179f --- /dev/null +++ b/modules/glue/catalog-database/main.tf @@ -0,0 +1,30 @@ +module "glue_catalog_database" { + source = "cloudposse/glue/aws//modules/glue-catalog-database" + version = "0.4.0" + + catalog_database_name = var.catalog_database_name + catalog_database_description = var.catalog_database_description + catalog_id = var.catalog_id + create_table_default_permission = var.create_table_default_permission + location_uri = var.location_uri + parameters = var.parameters + target_database = var.target_database + + context = module.this.context +} + +# Grant Lake Formation permissions to the Glue IAM role that is used to access the Glue database. +# This prevents the error: +# Error: error creating Glue crawler: InvalidInputException: Insufficient Lake Formation permission(s) on > (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException +# https://aws.amazon.com/premiumsupport/knowledge-center/glue-insufficient-lakeformation-permissions +# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lakeformation_permissions +resource "aws_lakeformation_permissions" "default" { + count = module.this.enabled && var.lakeformation_permissions_enabled ? 1 : 0 + + principal = module.glue_iam_role.outputs.role_arn + permissions = var.lakeformation_permissions + + database { + name = module.glue_catalog_database.name + } +} diff --git a/modules/glue/catalog-database/outputs.tf b/modules/glue/catalog-database/outputs.tf new file mode 100644 index 000000000..29e423401 --- /dev/null +++ b/modules/glue/catalog-database/outputs.tf @@ -0,0 +1,14 @@ +output "catalog_database_id" { + description = "Catalog database ID" + value = module.glue_catalog_database.id +} + +output "catalog_database_name" { + description = "Catalog database name" + value = module.glue_catalog_database.name +} + +output "catalog_database_arn" { + description = "Catalog database ARN" + value = module.glue_catalog_database.arn +} diff --git a/modules/glue/catalog-database/providers.tf b/modules/glue/catalog-database/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/catalog-database/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/catalog-database/remote-state.tf b/modules/glue/catalog-database/remote-state.tf new file mode 100644 index 000000000..040455b66 --- /dev/null +++ b/modules/glue/catalog-database/remote-state.tf @@ -0,0 +1,8 @@ +module "glue_iam_role" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_iam_component_name + + context = module.this.context +} diff --git a/modules/glue/catalog-database/variables.tf b/modules/glue/catalog-database/variables.tf new file mode 100644 index 000000000..e1d8e9c32 --- /dev/null +++ b/modules/glue/catalog-database/variables.tf @@ -0,0 +1,74 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "catalog_database_name" { + type = string + description = "Glue catalog database name. The acceptable characters are lowercase letters, numbers, and the underscore character" + default = null +} + +variable "catalog_database_description" { + type = string + description = "Glue catalog database description" + default = null +} + +variable "catalog_id" { + type = string + description = "ID of the Glue Catalog to create the database in. If omitted, this defaults to the AWS Account ID" + default = null +} + +variable "create_table_default_permission" { + # type = object({ + # permissions = list(string) + # principal = object({ + # data_lake_principal_identifier = string + # }) + # }) + type = any + description = "Creates a set of default permissions on the table for principals" + default = null +} + +variable "location_uri" { + type = string + description = "Location of the database (for example, an HDFS path)" + default = null +} + +variable "parameters" { + type = map(string) + description = "Map of key-value pairs that define parameters and properties of the database" + default = null +} + +variable "target_database" { + type = object({ + # If `target_database` is provided (not `null`), all these fields are required + catalog_id = string + database_name = string + }) + description = " Configuration block for a target database for resource linking" + default = null +} + +variable "glue_iam_component_name" { + type = string + description = "Glue IAM component name. Used to get the Glue IAM role from the remote state" + default = "glue/iam" +} + +variable "lakeformation_permissions_enabled" { + type = bool + description = "Whether to enable adding Lake Formation permissions to the IAM role that is used to access the Glue database" + default = true +} + +variable "lakeformation_permissions" { + type = list(string) + description = "List of permissions granted to the principal. Refer to https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html for more details" + default = ["ALL"] +} diff --git a/modules/glue/catalog-database/versions.tf b/modules/glue/catalog-database/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/catalog-database/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/catalog-table/README.md b/modules/glue/catalog-table/README.md new file mode 100644 index 000000000..8c7407714 --- /dev/null +++ b/modules/glue/catalog-table/README.md @@ -0,0 +1,123 @@ +--- +tags: + - component/glue/catalog-table + - layer/unassigned + - provider/aws +--- + +# Component: `glue/catalog-table` + +This component provisions Glue catalog tables. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/catalog-table/example: + metadata: + component: glue/catalog-table + vars: + enabled: true + name: example + catalog_table_description: Glue catalog table example + glue_iam_component_name: glue/iam + glue_catalog_database_component_name: glue/catalog-database/example + lakeformation_permissions_enabled: true + lakeformation_permissions: + - "ALL" + storage_descriptor: + location: "s3://awsglue-datasets/examples/medicare/Medicare_Hospital_Provider.csv" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_catalog\_database](#module\_glue\_catalog\_database) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [glue\_catalog\_table](#module\_glue\_catalog\_table) | cloudposse/glue/aws//modules/glue-catalog-table | 0.4.0 | +| [glue\_iam\_role](#module\_glue\_iam\_role) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_lakeformation_permissions.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lakeformation_permissions) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [catalog\_id](#input\_catalog\_id) | ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name | `string` | `null` | no | +| [catalog\_table\_description](#input\_catalog\_table\_description) | Description of the table | `string` | `null` | no | +| [catalog\_table\_name](#input\_catalog\_table\_name) | Name of the table | `string` | `null` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [glue\_catalog\_database\_component\_name](#input\_glue\_catalog\_database\_component\_name) | Glue catalog database component name where the table metadata resides. Used to get the Glue catalog database from the remote state | `string` | n/a | yes | +| [glue\_iam\_component\_name](#input\_glue\_iam\_component\_name) | Glue IAM component name. Used to get the Glue IAM role from the remote state | `string` | `"glue/iam"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [lakeformation\_permissions](#input\_lakeformation\_permissions) | List of permissions granted to the principal. Refer to https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html for more details | `list(string)` |
[
"ALL"
]
| no | +| [lakeformation\_permissions\_enabled](#input\_lakeformation\_permissions\_enabled) | Whether to enable adding Lake Formation permissions to the IAM role that is used to access the Glue table | `bool` | `true` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [owner](#input\_owner) | Owner of the table | `string` | `null` | no | +| [parameters](#input\_parameters) | Properties associated with this table, as a map of key-value pairs | `map(string)` | `null` | no | +| [partition\_index](#input\_partition\_index) | Configuration block for a maximum of 3 partition indexes |
object({
index_name = string
keys = list(string)
})
| `null` | no | +| [partition\_keys](#input\_partition\_keys) | Configuration block of columns by which the table is partitioned. Only primitive types are supported as partition keys | `map(string)` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [retention](#input\_retention) | Retention time for the table | `number` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [storage\_descriptor](#input\_storage\_descriptor) | Configuration block for information about the physical storage of this table | `any` | `null` | no | +| [table\_type](#input\_table\_type) | Type of this table (`EXTERNAL_TABLE`, `VIRTUAL_VIEW`, etc.). While optional, some Athena DDL queries such as `ALTER TABLE` and `SHOW CREATE TABLE` will fail if this argument is empty | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [target\_table](#input\_target\_table) | Configuration block of a target table for resource linking |
object({
catalog_id = string
database_name = string
name = string
})
| `null` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [view\_expanded\_text](#input\_view\_expanded\_text) | If the table is a view, the expanded text of the view; otherwise null | `string` | `null` | no | +| [view\_original\_text](#input\_view\_original\_text) | If the table is a view, the original text of the view; otherwise null | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [catalog\_table\_arn](#output\_catalog\_table\_arn) | Catalog table ARN | +| [catalog\_table\_id](#output\_catalog\_table\_id) | Catalog table ID | +| [catalog\_table\_name](#output\_catalog\_table\_name) | Catalog table name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/catalog-table) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/catalog-table/context.tf b/modules/glue/catalog-table/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/catalog-table/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/catalog-table/main.tf b/modules/glue/catalog-table/main.tf new file mode 100644 index 000000000..8f6272159 --- /dev/null +++ b/modules/glue/catalog-table/main.tf @@ -0,0 +1,42 @@ +locals { + database_name = module.glue_catalog_database.outputs.catalog_database_name +} + +module "glue_catalog_table" { + source = "cloudposse/glue/aws//modules/glue-catalog-table" + version = "0.4.0" + + catalog_table_name = var.catalog_table_name + catalog_table_description = var.catalog_table_description + catalog_id = var.catalog_id + database_name = local.database_name + owner = var.owner + parameters = var.parameters + partition_index = var.partition_index + partition_keys = var.partition_keys + retention = var.retention + table_type = var.table_type + target_table = var.target_table + view_expanded_text = var.view_expanded_text + view_original_text = var.view_original_text + storage_descriptor = var.storage_descriptor + + context = module.this.context +} + +# Grant Lake Formation permissions to the Glue IAM role that is used to access the Glue table. +# This prevents the error: +# Error: error creating Glue crawler: InvalidInputException: Insufficient Lake Formation permission(s) on > (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException +# https://aws.amazon.com/premiumsupport/knowledge-center/glue-insufficient-lakeformation-permissions +# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lakeformation_permissions +resource "aws_lakeformation_permissions" "default" { + count = module.this.enabled && var.lakeformation_permissions_enabled ? 1 : 0 + + principal = module.glue_iam_role.outputs.role_arn + permissions = var.lakeformation_permissions + + table { + database_name = local.database_name + name = module.glue_catalog_table.name + } +} diff --git a/modules/glue/catalog-table/outputs.tf b/modules/glue/catalog-table/outputs.tf new file mode 100644 index 000000000..99a122b02 --- /dev/null +++ b/modules/glue/catalog-table/outputs.tf @@ -0,0 +1,14 @@ +output "catalog_table_id" { + description = "Catalog table ID" + value = module.glue_catalog_table.id +} + +output "catalog_table_name" { + description = "Catalog table name" + value = module.glue_catalog_table.name +} + +output "catalog_table_arn" { + description = "Catalog table ARN" + value = module.glue_catalog_table.arn +} diff --git a/modules/glue/catalog-table/providers.tf b/modules/glue/catalog-table/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/catalog-table/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/catalog-table/remote-state.tf b/modules/glue/catalog-table/remote-state.tf new file mode 100644 index 000000000..db6ac3ccf --- /dev/null +++ b/modules/glue/catalog-table/remote-state.tf @@ -0,0 +1,17 @@ +module "glue_iam_role" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_iam_component_name + + context = module.this.context +} + +module "glue_catalog_database" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_catalog_database_component_name + + context = module.this.context +} diff --git a/modules/glue/catalog-table/variables.tf b/modules/glue/catalog-table/variables.tf new file mode 100644 index 000000000..ceff37fbb --- /dev/null +++ b/modules/glue/catalog-table/variables.tf @@ -0,0 +1,186 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "catalog_table_name" { + type = string + description = "Name of the table" + default = null +} + +variable "catalog_table_description" { + type = string + description = "Description of the table" + default = null +} + +variable "catalog_id" { + type = string + description = "ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name" + default = null +} + +variable "owner" { + type = string + description = "Owner of the table" + default = null +} + +variable "parameters" { + type = map(string) + description = "Properties associated with this table, as a map of key-value pairs" + default = null +} + +variable "partition_index" { + type = object({ + index_name = string + keys = list(string) + }) + description = "Configuration block for a maximum of 3 partition indexes" + default = null +} + +variable "partition_keys" { + # type = object({ + # comment = string + # name = string + # type = string + # }) + # Using `type = map(string)` since some of the the fields are optional and we don't want to force the caller to specify all of them and set to `null` those not used + type = map(string) + description = "Configuration block of columns by which the table is partitioned. Only primitive types are supported as partition keys" + default = null +} + +variable "retention" { + type = number + description = "Retention time for the table" + default = null +} + +variable "table_type" { + type = string + description = "Type of this table (`EXTERNAL_TABLE`, `VIRTUAL_VIEW`, etc.). While optional, some Athena DDL queries such as `ALTER TABLE` and `SHOW CREATE TABLE` will fail if this argument is empty" + default = null +} + +variable "target_table" { + type = object({ + catalog_id = string + database_name = string + name = string + }) + description = "Configuration block of a target table for resource linking" + default = null +} + +variable "view_expanded_text" { + type = string + description = "If the table is a view, the expanded text of the view; otherwise null" + default = null +} + +variable "view_original_text" { + type = string + description = "If the table is a view, the original text of the view; otherwise null" + default = null +} + +variable "storage_descriptor" { + # type = object({ + # # List of reducer grouping columns, clustering columns, and bucketing columns in the table + # bucket_columns = list(string) + # # Configuration block for columns in the table + # columns = list(object({ + # comment = string + # name = string + # parameters = map(string) + # type = string + # })) + # # Whether the data in the table is compressed + # compressed = bool + # # Input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format + # input_format = string + # # Physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name + # location = string + # # Must be specified if the table contains any dimension columns + # number_of_buckets = number + # # Output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format + # output_format = string + # # User-supplied properties in key-value form + # parameters = map(string) + # # Object that references a schema stored in the AWS Glue Schema Registry + # # When creating a table, you can pass an empty list of columns for the schema, and instead use a schema reference + # schema_reference = object({ + # # Configuration block that contains schema identity fields. Either this or the schema_version_id has to be provided + # schema_id = object({ + # # Name of the schema registry that contains the schema. Must be provided when schema_name is specified and conflicts with schema_arn + # registry_name = string + # # ARN of the schema. One of schema_arn or schema_name has to be provided + # schema_arn = string + # # Name of the schema. One of schema_arn or schema_name has to be provided + # schema_name = string + # }) + # # Unique ID assigned to a version of the schema. Either this or the schema_id has to be provided + # schema_version_id = string + # schema_version_number = number + # }) + # # Configuration block for serialization and deserialization ("SerDe") information + # ser_de_info = object({ + # # Name of the SerDe + # name = string + # # Map of initialization parameters for the SerDe, in key-value form + # parameters = map(string) + # # Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + # serialization_library = string + # }) + # # Configuration block with information about values that appear very frequently in a column (skewed values) + # skewed_info = object({ + # # List of names of columns that contain skewed values + # skewed_column_names = list(string) + # # List of values that appear so frequently as to be considered skewed + # skewed_column_value_location_maps = list(string) + # # Map of skewed values to the columns that contain them + # skewed_column_values = map(string) + # }) + # # Configuration block for the sort order of each bucket in the table + # sort_columns = object({ + # # Name of the column + # column = string + # # Whether the column is sorted in ascending (1) or descending order (0) + # sort_order = number + # }) + # # Whether the table data is stored in subdirectories + # stored_as_sub_directories = bool + # }) + + # Using `type = any` since some of the the fields are optional and we don't want to force the caller to specify all of them and set to `null` those not used + type = any + description = "Configuration block for information about the physical storage of this table" + default = null +} + +variable "glue_iam_component_name" { + type = string + description = "Glue IAM component name. Used to get the Glue IAM role from the remote state" + default = "glue/iam" +} + +variable "glue_catalog_database_component_name" { + type = string + description = "Glue catalog database component name where the table metadata resides. Used to get the Glue catalog database from the remote state" +} + +variable "lakeformation_permissions_enabled" { + type = bool + description = "Whether to enable adding Lake Formation permissions to the IAM role that is used to access the Glue table" + default = true +} + +variable "lakeformation_permissions" { + type = list(string) + description = "List of permissions granted to the principal. Refer to https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html for more details" + default = ["ALL"] +} diff --git a/modules/glue/catalog-table/versions.tf b/modules/glue/catalog-table/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/catalog-table/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/connection/README.md b/modules/glue/connection/README.md new file mode 100644 index 000000000..d6015a422 --- /dev/null +++ b/modules/glue/connection/README.md @@ -0,0 +1,132 @@ +--- +tags: + - component/glue/connection + - layer/unassigned + - provider/aws +--- + +# Component: `glue/connection` + +This component provisions Glue connections. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/connection/example/redshift: + metadata: + component: glue/connection + vars: + connection_name: "jdbc-redshift" + connection_description: "Glue Connection for Redshift" + connection_type: "JDBC" + db_type: "redshift" + connection_db_name: "analytics" + ssm_path_username: "/glue/redshift/admin_user" + ssm_path_password: "/glue/redshift/admin_password" + ssm_path_endpoint: "/glue/redshift/endpoint" + physical_connection_enabled: true + vpc_component_name: "vpc" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_connection](#module\_glue\_connection) | cloudposse/glue/aws//modules/glue-connection | 0.4.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [security\_group](#module\_security\_group) | cloudposse/security-group/aws | 2.2.0 | +| [target\_security\_group](#module\_target\_security\_group) | cloudposse/security-group/aws | 2.2.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ssm_parameter.endpoint](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.user](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_subnet.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [catalog\_id](#input\_catalog\_id) | The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default | `string` | `null` | no | +| [connection\_db\_name](#input\_connection\_db\_name) | Database name that the Glue connector will reference | `string` | `null` | no | +| [connection\_description](#input\_connection\_description) | Connection description | `string` | `null` | no | +| [connection\_name](#input\_connection\_name) | Connection name. If not provided, the name will be generated from the context | `string` | `null` | no | +| [connection\_properties](#input\_connection\_properties) | A map of key-value pairs used as parameters for this connection | `map(string)` | `null` | no | +| [connection\_type](#input\_connection\_type) | The type of the connection. Supported are: JDBC, MONGODB, KAFKA, and NETWORK. Defaults to JDBC | `string` | n/a | yes | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [db\_type](#input\_db\_type) | Database type for the connection URL: `postgres` or `redshift` | `string` | `"redshift"` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [match\_criteria](#input\_match\_criteria) | A list of criteria that can be used in selecting this connection | `list(string)` | `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [physical\_connection\_enabled](#input\_physical\_connection\_enabled) | Flag to enable/disable physical connection | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [security\_group\_allow\_all\_egress](#input\_security\_group\_allow\_all\_egress) | A convenience that adds to the rules a rule that allows all egress.
If this is false and no egress rules are specified via `rules` or `rule-matrix`, then no egress will be allowed. | `bool` | `true` | no | +| [security\_group\_create\_before\_destroy](#input\_security\_group\_create\_before\_destroy) | Set `true` to enable terraform `create_before_destroy` behavior on the created security group.
We only recommend setting this `false` if you are importing an existing security group
that you do not want replaced and therefore need full control over its name.
Note that changing this value will always cause the security group to be replaced. | `bool` | `true` | no | +| [security\_group\_ingress\_cidr\_blocks](#input\_security\_group\_ingress\_cidr\_blocks) | A list of CIDR blocks for the the cluster Security Group to allow ingress to the cluster security group | `list(string)` | `[]` | no | +| [security\_group\_ingress\_from\_port](#input\_security\_group\_ingress\_from\_port) | Start port on which the Glue connection accepts incoming connections | `number` | `0` | no | +| [security\_group\_ingress\_to\_port](#input\_security\_group\_ingress\_to\_port) | End port on which the Glue connection accepts incoming connections | `number` | `0` | no | +| [ssm\_path\_endpoint](#input\_ssm\_path\_endpoint) | Database endpoint SSM path | `string` | `null` | no | +| [ssm\_path\_password](#input\_ssm\_path\_password) | Database password SSM path | `string` | `null` | no | +| [ssm\_path\_username](#input\_ssm\_path\_username) | Database username SSM path | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [target\_security\_group\_rules](#input\_target\_security\_group\_rules) | Additional Security Group rules that allow Glue to communicate with the target database | `list(any)` | `[]` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | VPC component name | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [connection\_arn](#output\_connection\_arn) | Glue connection ARN | +| [connection\_id](#output\_connection\_id) | Glue connection ID | +| [connection\_name](#output\_connection\_name) | Glue connection name | +| [security\_group\_arn](#output\_security\_group\_arn) | The ARN of the Security Group associated with the Glue connection | +| [security\_group\_id](#output\_security\_group\_id) | The ID of the Security Group associated with the Glue connection | +| [security\_group\_name](#output\_security\_group\_name) | The name of the Security Group and associated with the Glue connection | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/connection) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/connection/context.tf b/modules/glue/connection/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/connection/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/connection/main.tf b/modules/glue/connection/main.tf new file mode 100644 index 000000000..8fc89c110 --- /dev/null +++ b/modules/glue/connection/main.tf @@ -0,0 +1,48 @@ +locals { + enabled = module.this.enabled + + physical_connection_enabled = local.enabled && var.physical_connection_enabled + + subnet_id = local.physical_connection_enabled ? module.vpc.outputs.private_subnet_ids[0] : null + + availability_zone = local.physical_connection_enabled ? data.aws_subnet.selected[0].availability_zone : null + + physical_connection_requirements = local.physical_connection_enabled ? { + # List of security group IDs used by the connection + security_group_id_list = [module.security_group.id] + # The availability zone of the connection. This field is redundant and implied by subnet_id, but is currently an API requirement + availability_zone = local.availability_zone + # The subnet ID used by the connection + subnet_id = local.subnet_id + } : null + + username = one(data.aws_ssm_parameter.user.*.value) + password = one(data.aws_ssm_parameter.password.*.value) + endpoint = one(data.aws_ssm_parameter.endpoint.*.value) +} + +data "aws_subnet" "selected" { + count = local.physical_connection_enabled ? 1 : 0 + + id = local.subnet_id +} + +module "glue_connection" { + source = "cloudposse/glue/aws//modules/glue-connection" + version = "0.4.0" + + connection_name = var.connection_name + connection_description = var.connection_description + catalog_id = var.catalog_id + connection_type = var.connection_type + match_criteria = var.match_criteria + physical_connection_requirements = local.physical_connection_requirements + + connection_properties = { + JDBC_CONNECTION_URL = "jdbc:${var.db_type}://${local.endpoint}/${var.connection_db_name}" + USERNAME = local.username + PASSWORD = local.password + } + + context = module.this.context +} diff --git a/modules/glue/connection/outputs.tf b/modules/glue/connection/outputs.tf new file mode 100644 index 000000000..7f2af22c3 --- /dev/null +++ b/modules/glue/connection/outputs.tf @@ -0,0 +1,29 @@ +output "connection_id" { + description = "Glue connection ID" + value = module.glue_connection.id +} + +output "connection_name" { + description = "Glue connection name" + value = module.glue_connection.name +} + +output "connection_arn" { + description = "Glue connection ARN" + value = module.glue_connection.arn +} + +output "security_group_id" { + description = "The ID of the Security Group associated with the Glue connection" + value = module.security_group.id +} + +output "security_group_arn" { + description = "The ARN of the Security Group associated with the Glue connection" + value = module.security_group.arn +} + +output "security_group_name" { + description = "The name of the Security Group and associated with the Glue connection" + value = module.security_group.name +} diff --git a/modules/glue/connection/providers.tf b/modules/glue/connection/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/connection/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/connection/remote-state.tf b/modules/glue/connection/remote-state.tf new file mode 100644 index 000000000..0238c5950 --- /dev/null +++ b/modules/glue/connection/remote-state.tf @@ -0,0 +1,15 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.vpc_component_name + + bypass = !local.physical_connection_enabled + + defaults = { + private_subnet_ids = [] + vpc_id = null + } + + context = module.this.context +} diff --git a/modules/glue/connection/sg.tf b/modules/glue/connection/sg.tf new file mode 100644 index 000000000..1bce26c0c --- /dev/null +++ b/modules/glue/connection/sg.tf @@ -0,0 +1,42 @@ +locals { + ingress_cidr_blocks_enabled = local.physical_connection_enabled && var.security_group_ingress_cidr_blocks != null && length(var.security_group_ingress_cidr_blocks) > 0 + + rules = local.ingress_cidr_blocks_enabled ? [ + { + type = "ingress" + from_port = var.security_group_ingress_from_port + to_port = var.security_group_ingress_to_port + protocol = "all" + cidr_blocks = var.security_group_ingress_cidr_blocks + } + ] : [] +} + +module "security_group" { + source = "cloudposse/security-group/aws" + version = "2.2.0" + + enabled = local.physical_connection_enabled + + vpc_id = module.vpc.outputs.vpc_id + create_before_destroy = var.security_group_create_before_destroy + allow_all_egress = var.security_group_allow_all_egress + rules = local.rules + + context = module.this.context +} + +# This allows adding the necessary Security Group rules for Glue to communicate with Redshift +module "target_security_group" { + source = "cloudposse/security-group/aws" + version = "2.2.0" + + enabled = local.enabled && var.target_security_group_rules != null && length(var.target_security_group_rules) > 0 + + vpc_id = module.vpc.outputs.vpc_id + security_group_name = [module.security_group.name] + target_security_group_id = [module.security_group.id] + rules = var.target_security_group_rules + + context = module.this.context +} diff --git a/modules/glue/connection/ssm.tf b/modules/glue/connection/ssm.tf new file mode 100644 index 000000000..7397013b9 --- /dev/null +++ b/modules/glue/connection/ssm.tf @@ -0,0 +1,17 @@ +data "aws_ssm_parameter" "endpoint" { + count = local.enabled && var.ssm_path_endpoint != null ? 1 : 0 + + name = var.ssm_path_endpoint +} + +data "aws_ssm_parameter" "user" { + count = local.enabled && var.ssm_path_username != null ? 1 : 0 + + name = var.ssm_path_username +} + +data "aws_ssm_parameter" "password" { + count = local.enabled && var.ssm_path_password != null ? 1 : 0 + + name = var.ssm_path_password +} diff --git a/modules/glue/connection/variables.tf b/modules/glue/connection/variables.tf new file mode 100644 index 000000000..6a3c44cff --- /dev/null +++ b/modules/glue/connection/variables.tf @@ -0,0 +1,129 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "vpc_component_name" { + type = string + description = "VPC component name" +} + +variable "connection_name" { + type = string + description = "Connection name. If not provided, the name will be generated from the context" + default = null +} + +variable "connection_description" { + type = string + description = "Connection description" + default = null +} + +variable "catalog_id" { + type = string + description = "The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default" + default = null +} + +variable "connection_type" { + type = string + description = "The type of the connection. Supported are: JDBC, MONGODB, KAFKA, and NETWORK. Defaults to JDBC" + + validation { + condition = contains(["JDBC", "MONGODB", "KAFKA", "NETWORK"], var.connection_type) + error_message = "Supported are: JDBC, MONGODB, KAFKA, and NETWORK" + } +} + +variable "connection_properties" { + type = map(string) + description = "A map of key-value pairs used as parameters for this connection" + default = null +} + +variable "match_criteria" { + type = list(string) + description = "A list of criteria that can be used in selecting this connection" + default = null +} + +variable "security_group_create_before_destroy" { + type = bool + description = <<-EOT + Set `true` to enable terraform `create_before_destroy` behavior on the created security group. + We only recommend setting this `false` if you are importing an existing security group + that you do not want replaced and therefore need full control over its name. + Note that changing this value will always cause the security group to be replaced. + EOT + default = true +} + +variable "security_group_allow_all_egress" { + type = bool + default = true + description = <<-EOT + A convenience that adds to the rules a rule that allows all egress. + If this is false and no egress rules are specified via `rules` or `rule-matrix`, then no egress will be allowed. + EOT +} + +variable "security_group_ingress_cidr_blocks" { + type = list(string) + default = [] + description = "A list of CIDR blocks for the the cluster Security Group to allow ingress to the cluster security group" +} + +variable "security_group_ingress_from_port" { + type = number + default = 0 + description = "Start port on which the Glue connection accepts incoming connections" +} + +variable "security_group_ingress_to_port" { + type = number + default = 0 + description = "End port on which the Glue connection accepts incoming connections" +} + +variable "physical_connection_enabled" { + type = bool + description = "Flag to enable/disable physical connection" + default = false +} + +variable "connection_db_name" { + type = string + description = "Database name that the Glue connector will reference" + default = null +} + +variable "ssm_path_username" { + type = string + description = "Database username SSM path" + default = null +} + +variable "ssm_path_password" { + type = string + description = "Database password SSM path" + default = null +} + +variable "ssm_path_endpoint" { + type = string + description = "Database endpoint SSM path" + default = null +} + +variable "target_security_group_rules" { + type = list(any) + description = "Additional Security Group rules that allow Glue to communicate with the target database" + default = [] +} + +variable "db_type" { + type = string + description = "Database type for the connection URL: `postgres` or `redshift`" + default = "redshift" +} diff --git a/modules/glue/connection/versions.tf b/modules/glue/connection/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/connection/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/crawler/README.md b/modules/glue/crawler/README.md new file mode 100644 index 000000000..a06fd003c --- /dev/null +++ b/modules/glue/crawler/README.md @@ -0,0 +1,125 @@ +--- +tags: + - component/glue/crawler + - layer/unassigned + - provider/aws +--- + +# Component: `glue/crawler` + +This component provisions Glue crawlers. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + # The crawler crawls the data in an S3 bucket and puts the results into a table in the Glue Catalog. + # The crawler will read the first 2 MB of data from the file, and recognize the schema. + # After that, the crawler will sync the table. + glue/crawler/example: + metadata: + component: glue/crawler + vars: + enabled: true + name: example + crawler_description: "Glue crawler example" + glue_iam_component_name: "glue/iam" + glue_catalog_database_component_name: "glue/catalog-database/example" + glue_catalog_table_component_name: "glue/catalog-table/example" + schedule: "cron(0 1 * * ? *)" + schema_change_policy: + delete_behavior: LOG + update_behavior: null +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_catalog\_database](#module\_glue\_catalog\_database) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [glue\_catalog\_table](#module\_glue\_catalog\_table) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [glue\_crawler](#module\_glue\_crawler) | cloudposse/glue/aws//modules/glue-crawler | 0.4.0 | +| [glue\_iam\_role](#module\_glue\_iam\_role) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [catalog\_target](#input\_catalog\_target) | List of nested Glue catalog target arguments |
list(object({
database_name = string
tables = list(string)
}))
| `null` | no | +| [classifiers](#input\_classifiers) | List of custom classifiers. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification | `list(string)` | `null` | no | +| [configuration](#input\_configuration) | JSON string of configuration information | `string` | `null` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [crawler\_description](#input\_crawler\_description) | Glue crawler description | `string` | `null` | no | +| [crawler\_name](#input\_crawler\_name) | Glue crawler name. If not provided, the name will be generated from the context | `string` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [delta\_target](#input\_delta\_target) | List of nested Delta target arguments |
list(object({
connection_name = string
delta_tables = list(string)
write_manifest = bool
}))
| `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dynamodb\_target](#input\_dynamodb\_target) | List of nested DynamoDB target arguments | `list(any)` | `null` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [glue\_catalog\_database\_component\_name](#input\_glue\_catalog\_database\_component\_name) | Glue catalog database component name where metadata resides. Used to get the Glue catalog database from the remote state | `string` | n/a | yes | +| [glue\_catalog\_table\_component\_name](#input\_glue\_catalog\_table\_component\_name) | Glue catalog table component name where metadata resides. Used to get the Glue catalog table from the remote state | `string` | `null` | no | +| [glue\_iam\_component\_name](#input\_glue\_iam\_component\_name) | Glue IAM component name. Used to get the Glue IAM role from the remote state | `string` | `"glue/iam"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [jdbc\_target](#input\_jdbc\_target) | List of nested JBDC target arguments | `list(any)` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [lineage\_configuration](#input\_lineage\_configuration) | Specifies data lineage configuration settings for the crawler |
object({
crawler_lineage_settings = string
})
| `null` | no | +| [mongodb\_target](#input\_mongodb\_target) | List of nested MongoDB target arguments | `list(any)` | `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [recrawl\_policy](#input\_recrawl\_policy) | A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run |
object({
recrawl_behavior = string
})
| `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [s3\_target](#input\_s3\_target) | List of nested Amazon S3 target arguments | `list(any)` | `null` | no | +| [schedule](#input\_schedule) | A cron expression for the schedule | `string` | `null` | no | +| [schema\_change\_policy](#input\_schema\_change\_policy) | Policy for the crawler's update and deletion behavior | `map(string)` | `null` | no | +| [security\_configuration](#input\_security\_configuration) | The name of Security Configuration to be used by the crawler | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [table\_prefix](#input\_table\_prefix) | The table prefix used for catalog tables that are created | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [crawler\_arn](#output\_crawler\_arn) | Crawler ARN | +| [crawler\_id](#output\_crawler\_id) | Crawler ID | +| [crawler\_name](#output\_crawler\_name) | Crawler name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/crawler) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/crawler/context.tf b/modules/glue/crawler/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/crawler/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/crawler/main.tf b/modules/glue/crawler/main.tf new file mode 100644 index 000000000..c4763f6dd --- /dev/null +++ b/modules/glue/crawler/main.tf @@ -0,0 +1,38 @@ +locals { + database_name = module.glue_catalog_database.outputs.catalog_database_name + table_name = module.glue_catalog_table.outputs.catalog_table_name + iam_role_arn = module.glue_iam_role.outputs.role_arn + + catalog_target = var.catalog_target != null ? var.catalog_target : [ + { + database_name = local.database_name + tables = [local.table_name] + } + ] +} + +module "glue_crawler" { + source = "cloudposse/glue/aws//modules/glue-crawler" + version = "0.4.0" + + crawler_name = var.crawler_name + crawler_description = var.crawler_description + database_name = local.database_name + role = local.iam_role_arn + schedule = var.schedule + classifiers = var.classifiers + configuration = var.configuration + jdbc_target = var.jdbc_target + dynamodb_target = var.dynamodb_target + s3_target = var.s3_target + mongodb_target = var.mongodb_target + catalog_target = local.catalog_target + delta_target = var.delta_target + table_prefix = var.table_prefix + security_configuration = var.security_configuration + schema_change_policy = var.schema_change_policy + lineage_configuration = var.lineage_configuration + recrawl_policy = var.recrawl_policy + + context = module.this.context +} diff --git a/modules/glue/crawler/outputs.tf b/modules/glue/crawler/outputs.tf new file mode 100644 index 000000000..36ec7dcc6 --- /dev/null +++ b/modules/glue/crawler/outputs.tf @@ -0,0 +1,14 @@ +output "crawler_id" { + description = "Crawler ID" + value = module.glue_crawler.id +} + +output "crawler_name" { + description = "Crawler name" + value = module.glue_crawler.name +} + +output "crawler_arn" { + description = "Crawler ARN" + value = module.glue_crawler.arn +} diff --git a/modules/glue/crawler/providers.tf b/modules/glue/crawler/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/crawler/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/crawler/remote-state.tf b/modules/glue/crawler/remote-state.tf new file mode 100644 index 000000000..1d90fec73 --- /dev/null +++ b/modules/glue/crawler/remote-state.tf @@ -0,0 +1,34 @@ +module "glue_iam_role" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_iam_component_name + + context = module.this.context +} + +module "glue_catalog_database" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_catalog_database_component_name + + context = module.this.context +} + +module "glue_catalog_table" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_catalog_table_component_name + + bypass = var.glue_catalog_table_component_name == null + + defaults = { + catalog_table_id = null + catalog_table_name = null + catalog_table_arn = null + } + + context = module.this.context +} diff --git a/modules/glue/crawler/variables.tf b/modules/glue/crawler/variables.tf new file mode 100644 index 000000000..75988d184 --- /dev/null +++ b/modules/glue/crawler/variables.tf @@ -0,0 +1,165 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "crawler_name" { + type = string + description = "Glue crawler name. If not provided, the name will be generated from the context" + default = null +} + +variable "crawler_description" { + type = string + description = "Glue crawler description" + default = null +} + +variable "schedule" { + type = string + description = "A cron expression for the schedule" + default = null +} + +variable "classifiers" { + type = list(string) + description = "List of custom classifiers. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification" + default = null +} + +variable "configuration" { + type = string + description = "JSON string of configuration information" + default = null +} + +variable "jdbc_target" { + # type = list(object({ + # connection_name = string + # path = string + # exclusions = list(string) + # })) + + # Using `type = list(any)` since some of the the fields are optional and we don't want to force the caller to specify all of them and set to `null` those not used + type = list(any) + description = "List of nested JBDC target arguments" + default = null +} + +variable "dynamodb_target" { + # type = list(object({ + # path = string + # scan_all = bool + # scan_rate = number + # })) + + # Using `type = list(any)` since some of the the fields are optional and we don't want to force the caller to specify all of them and set to `null` those not used + type = list(any) + description = "List of nested DynamoDB target arguments" + default = null +} + +variable "s3_target" { + # type = list(object({ + # path = string + # connection_name = string + # exclusions = list(string) + # sample_size = number + # event_queue_arn = string + # dlq_event_queue_arn = string + # })) + + # Using `type = list(any)` since some of the the fields are optional and we don't want to force the caller to specify all of them and set to `null` those not used + type = list(any) + description = "List of nested Amazon S3 target arguments" + default = null +} + +variable "mongodb_target" { + # type = list(object({ + # connection_name = string + # path = string + # scan_all = bool + # })) + + # Using `type = list(any)` since some of the the fields are optional and we don't want to force the caller to specify all of them and set to `null` those not used + type = list(any) + description = "List of nested MongoDB target arguments" + default = null +} + +variable "catalog_target" { + type = list(object({ + database_name = string + tables = list(string) + })) + description = "List of nested Glue catalog target arguments" + default = null +} + +variable "delta_target" { + type = list(object({ + connection_name = string + delta_tables = list(string) + write_manifest = bool + })) + description = "List of nested Delta target arguments" + default = null +} + +variable "table_prefix" { + type = string + description = "The table prefix used for catalog tables that are created" + default = null +} + +variable "security_configuration" { + type = string + description = "The name of Security Configuration to be used by the crawler" + default = null +} + +variable "schema_change_policy" { + # type = object({ + # delete_behavior = string + # update_behavior = string + # }) + + # Using `type = map(string)` since some of the the fields are optional and we don't want to force the caller to specify all of them and set to `null` those not used + type = map(string) + description = "Policy for the crawler's update and deletion behavior" + default = null +} + +variable "lineage_configuration" { + type = object({ + crawler_lineage_settings = string + }) + description = "Specifies data lineage configuration settings for the crawler" + default = null +} + +variable "recrawl_policy" { + type = object({ + recrawl_behavior = string + }) + description = "A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run" + default = null +} + +variable "glue_iam_component_name" { + type = string + description = "Glue IAM component name. Used to get the Glue IAM role from the remote state" + default = "glue/iam" +} + +variable "glue_catalog_database_component_name" { + type = string + description = "Glue catalog database component name where metadata resides. Used to get the Glue catalog database from the remote state" +} + +variable "glue_catalog_table_component_name" { + type = string + description = "Glue catalog table component name where metadata resides. Used to get the Glue catalog table from the remote state" + default = null +} diff --git a/modules/glue/crawler/versions.tf b/modules/glue/crawler/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/crawler/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/iam/README.md b/modules/glue/iam/README.md new file mode 100644 index 000000000..ce4020405 --- /dev/null +++ b/modules/glue/iam/README.md @@ -0,0 +1,99 @@ +--- +tags: + - component/glue/iam + - layer/unassigned + - provider/aws +--- + +# Component: `glue/iam` + +This component provisions IAM roles for AWS Glue. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/iam: + metadata: + component: glue/iam + vars: + enabled: true + name: glue + iam_role_description: "Role for AWS Glue with access to EC2, S3, and Cloudwatch Logs" + iam_policy_description: "Policy for AWS Glue with access to EC2, S3, and Cloudwatch Logs" + iam_managed_policy_arns: + - "arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_role](#module\_iam\_role) | cloudposse/iam-role/aws | 0.19.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [iam\_managed\_policy\_arns](#input\_iam\_managed\_policy\_arns) | IAM managed policy ARNs | `list(string)` |
[
"arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole"
]
| no | +| [iam\_policy\_description](#input\_iam\_policy\_description) | Glue IAM policy description | `string` | `"Policy for AWS Glue with access to EC2, S3, and Cloudwatch Logs"` | no | +| [iam\_role\_description](#input\_iam\_role\_description) | Glue IAM role description | `string` | `"Role for AWS Glue with access to EC2, S3, and Cloudwatch Logs"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [role\_arn](#output\_role\_arn) | The ARN of the Glue role | +| [role\_id](#output\_role\_id) | The ID of the Glue role | +| [role\_name](#output\_role\_name) | The name of the Glue role | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/iam) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/iam/context.tf b/modules/glue/iam/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/iam/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/iam/main.tf b/modules/glue/iam/main.tf new file mode 100644 index 000000000..246071692 --- /dev/null +++ b/modules/glue/iam/main.tf @@ -0,0 +1,15 @@ +module "iam_role" { + source = "cloudposse/iam-role/aws" + version = "0.19.0" + + principals = { + "Service" = ["glue.amazonaws.com"] + } + + managed_policy_arns = var.iam_managed_policy_arns + role_description = var.iam_role_description + policy_description = var.iam_policy_description + policy_document_count = 0 + + context = module.this.context +} diff --git a/modules/glue/iam/outputs.tf b/modules/glue/iam/outputs.tf new file mode 100644 index 000000000..b48c6deb2 --- /dev/null +++ b/modules/glue/iam/outputs.tf @@ -0,0 +1,14 @@ +output "role_name" { + value = module.iam_role.name + description = "The name of the Glue role" +} + +output "role_id" { + value = module.iam_role.id + description = "The ID of the Glue role" +} + +output "role_arn" { + value = module.iam_role.arn + description = "The ARN of the Glue role" +} diff --git a/modules/glue/iam/providers.tf b/modules/glue/iam/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/iam/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/iam/variables.tf b/modules/glue/iam/variables.tf new file mode 100644 index 000000000..6b423894a --- /dev/null +++ b/modules/glue/iam/variables.tf @@ -0,0 +1,22 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "iam_role_description" { + type = string + description = "Glue IAM role description" + default = "Role for AWS Glue with access to EC2, S3, and Cloudwatch Logs" +} + +variable "iam_policy_description" { + type = string + description = "Glue IAM policy description" + default = "Policy for AWS Glue with access to EC2, S3, and Cloudwatch Logs" +} + +variable "iam_managed_policy_arns" { + type = list(string) + description = "IAM managed policy ARNs" + default = ["arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole"] +} diff --git a/modules/glue/iam/versions.tf b/modules/glue/iam/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/iam/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/job/README.md b/modules/glue/job/README.md new file mode 100644 index 000000000..d642f38a0 --- /dev/null +++ b/modules/glue/job/README.md @@ -0,0 +1,132 @@ +--- +tags: + - component/glue/job + - layer/unassigned + - provider/aws +--- + +# Component: `glue/job` + +This component provisions Glue jobs. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/job/example: + metadata: + component: glue/job + vars: + enabled: true + name: example + job_description: Glue job example + glue_version: "2.0" + worker_type: Standard + number_of_workers: 2 + max_retries: 2 + timeout: 20 + glue_iam_component_name: "glue/iam" + glue_job_s3_bucket_component_name: "s3/datalake" + glue_job_s3_bucket_script_path: "glue/glue_job.py" + glue_job_command_name: glueetl + glue_job_command_python_version: 3 +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_iam\_role](#module\_glue\_iam\_role) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [glue\_job](#module\_glue\_job) | cloudposse/glue/aws//modules/glue-job | 0.4.0 | +| [glue\_job\_s3\_bucket](#module\_glue\_job\_s3\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_policy.glue_job_aws_tools_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role_policy_attachment.glue_jobs_aws_tools_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.glue_redshift_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_policy_document.glue_job_aws_tools_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [command](#input\_command) | The command of the job | `map(any)` | `null` | no | +| [connections](#input\_connections) | The list of connections used for this job | `list(string)` | `null` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [default\_arguments](#input\_default\_arguments) | The map of default arguments for the job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes | `map(string)` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [execution\_property](#input\_execution\_property) | Execution property of the job |
object({
# The maximum number of concurrent runs allowed for the job. The default is 1.
max_concurrent_runs = number
})
| `null` | no | +| [glue\_iam\_component\_name](#input\_glue\_iam\_component\_name) | Glue IAM component name. Used to get the Glue IAM role from the remote state | `string` | `"glue/iam"` | no | +| [glue\_job\_command\_name](#input\_glue\_job\_command\_name) | The name of the job command. Defaults to glueetl. Use pythonshell for Python Shell Job Type, or gluestreaming for Streaming Job Type. max\_capacity needs to be set if pythonshell is chosen | `string` | `"glueetl"` | no | +| [glue\_job\_command\_python\_version](#input\_glue\_job\_command\_python\_version) | The Python version being used to execute a Python shell job. Allowed values are 2, 3 or 3.9. Version 3 refers to Python 3.6 | `number` | `3` | no | +| [glue\_job\_s3\_bucket\_component\_name](#input\_glue\_job\_s3\_bucket\_component\_name) | Glue job S3 bucket component name. Used to get the remote state of the S3 bucket where the Glue job script is located | `string` | `null` | no | +| [glue\_job\_s3\_bucket\_script\_path](#input\_glue\_job\_s3\_bucket\_script\_path) | Glue job script path in the S3 bucket | `string` | `null` | no | +| [glue\_version](#input\_glue\_version) | The version of Glue to use | `string` | `"2.0"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [job\_description](#input\_job\_description) | Glue job description | `string` | `null` | no | +| [job\_name](#input\_job\_name) | Glue job name. If not provided, the name will be generated from the context | `string` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [max\_capacity](#input\_max\_capacity) | The maximum number of AWS Glue data processing units (DPUs) that can be allocated when the job runs. Required when `pythonshell` is set, accept either 0.0625 or 1.0. Use `number_of_workers` and `worker_type` arguments instead with `glue_version` 2.0 and above | `number` | `null` | no | +| [max\_retries](#input\_max\_retries) | The maximum number of times to retry the job if it fails | `number` | `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [non\_overridable\_arguments](#input\_non\_overridable\_arguments) | Non-overridable arguments for this job, specified as name-value pairs | `map(string)` | `null` | no | +| [notification\_property](#input\_notification\_property) | Notification property of the job |
object({
# After a job run starts, the number of minutes to wait before sending a job run delay notification
notify_delay_after = number
})
| `null` | no | +| [number\_of\_workers](#input\_number\_of\_workers) | The number of workers of a defined `worker_type` that are allocated when a job runs | `number` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [security\_configuration](#input\_security\_configuration) | The name of the Security Configuration to be associated with the job | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input\_timeout) | The job timeout in minutes. The default is 2880 minutes (48 hours) for `glueetl` and `pythonshell` jobs, and `null` (unlimited) for `gluestreaming` jobs | `number` | `2880` | no | +| [worker\_type](#input\_worker\_type) | The type of predefined worker that is allocated when a job runs. Accepts a value of `Standard`, `G.1X`, or `G.2X` | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [job\_arn](#output\_job\_arn) | Glue job ARN | +| [job\_id](#output\_job\_id) | Glue job ID | +| [job\_name](#output\_job\_name) | Glue job name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/job) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/job/context.tf b/modules/glue/job/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/job/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/job/iam.tf b/modules/glue/job/iam.tf new file mode 100644 index 000000000..94c3f1725 --- /dev/null +++ b/modules/glue/job/iam.tf @@ -0,0 +1,78 @@ +data "aws_iam_policy_document" "glue_job_aws_tools_access" { + count = local.enabled ? 1 : 0 + + statement { + sid = "S3BucketAccess" + effect = "Allow" + actions = [ + "s3:ListBucket", + "s3:GetObject", + "s3:PutObject", + "S3:GetBucketAcl", + "s3:PutObjectAcl" + ] + resources = ["*"] + } + + statement { + sid = "ParamStoreReadAccess" + effect = "Allow" + actions = [ + "ssm:GetParameter" + ] + resources = ["*"] + } + + statement { + sid = "SecretsManagerReadAccess" + effect = "Allow" + actions = [ + "secretsmanager:GetResourcePolicy", + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "secretsmanager:ListSecretVersionIds", + "secretsmanager:ListSecrets" + ] + resources = ["*"] + } + + statement { + sid = "DynamoDBTableAccess" + effect = "Allow" + actions = [ + "dynamodb:BatchGetItem", + "dynamodb:BatchWriteItem", + "dynamodb:ConditionCheckItem", + "dynamodb:PutItem", + "dynamodb:DescribeTable", + "dynamodb:DeleteItem", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:Query", + "dynamodb:UpdateItem" + ] + resources = ["*"] + } +} + +resource "aws_iam_policy" "glue_job_aws_tools_access" { + count = local.enabled ? 1 : 0 + + name = "${module.this.id}-custom-access" + description = "Policy for Glue jobs to interact with S3 buckets, SSM, Systems Manager Parameter Store, DynamoDB tables and Lambda Functions" + policy = one(data.aws_iam_policy_document.glue_job_aws_tools_access.*.json) +} + +resource "aws_iam_role_policy_attachment" "glue_jobs_aws_tools_access" { + count = local.enabled ? 1 : 0 + + role = local.glue_iam_role_name + policy_arn = one(aws_iam_policy.glue_job_aws_tools_access.*.arn) +} + +resource "aws_iam_role_policy_attachment" "glue_redshift_access" { + count = local.enabled ? 1 : 0 + + role = local.glue_iam_role_name + policy_arn = "arn:aws:iam::aws:policy/AmazonRedshiftFullAccess" +} diff --git a/modules/glue/job/main.tf b/modules/glue/job/main.tf new file mode 100644 index 000000000..337c2d240 --- /dev/null +++ b/modules/glue/job/main.tf @@ -0,0 +1,36 @@ +locals { + enabled = module.this.enabled + + glue_iam_role_arn = module.glue_iam_role.outputs.role_arn + glue_iam_role_name = module.glue_iam_role.outputs.role_name + + command = var.command != null ? var.command : { + name = var.glue_job_command_name + script_location = format("s3://%s/%s", module.glue_job_s3_bucket.outputs.bucket_id, var.glue_job_s3_bucket_script_path) + python_version = var.glue_job_command_python_version + } +} + +module "glue_job" { + source = "cloudposse/glue/aws//modules/glue-job" + version = "0.4.0" + + job_name = var.job_name + job_description = var.job_description + role_arn = local.glue_iam_role_arn + connections = var.connections + glue_version = var.glue_version + default_arguments = var.default_arguments + non_overridable_arguments = var.non_overridable_arguments + security_configuration = var.security_configuration + timeout = var.timeout + max_capacity = var.max_capacity + max_retries = var.max_retries + worker_type = var.worker_type + number_of_workers = var.number_of_workers + command = local.command + execution_property = var.execution_property + notification_property = var.notification_property + + context = module.this.context +} diff --git a/modules/glue/job/outputs.tf b/modules/glue/job/outputs.tf new file mode 100644 index 000000000..5c6e7647a --- /dev/null +++ b/modules/glue/job/outputs.tf @@ -0,0 +1,14 @@ +output "job_id" { + description = "Glue job ID" + value = module.glue_job.id +} + +output "job_name" { + description = "Glue job name" + value = module.glue_job.name +} + +output "job_arn" { + description = "Glue job ARN" + value = module.glue_job.arn +} diff --git a/modules/glue/job/providers.tf b/modules/glue/job/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/job/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/job/remote-state.tf b/modules/glue/job/remote-state.tf new file mode 100644 index 000000000..e69f398dc --- /dev/null +++ b/modules/glue/job/remote-state.tf @@ -0,0 +1,26 @@ +module "glue_iam_role" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_iam_component_name + + context = module.this.context +} + +module "glue_job_s3_bucket" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_job_s3_bucket_component_name + bypass = var.glue_job_s3_bucket_component_name == null + + defaults = { + bucket_id = null + bucket_arn = null + bucket_domain_name = null + bucket_regional_domain_name = null + bucket_region = null + } + + context = module.this.context +} diff --git a/modules/glue/job/variables.tf b/modules/glue/job/variables.tf new file mode 100644 index 000000000..af205769f --- /dev/null +++ b/modules/glue/job/variables.tf @@ -0,0 +1,142 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "job_name" { + type = string + description = "Glue job name. If not provided, the name will be generated from the context" + default = null +} + +variable "job_description" { + type = string + description = "Glue job description" + default = null +} + +variable "connections" { + type = list(string) + description = "The list of connections used for this job" + default = null +} + +variable "glue_version" { + type = string + description = "The version of Glue to use" + default = "2.0" +} + +variable "default_arguments" { + type = map(string) + description = "The map of default arguments for the job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes" + default = null +} + +variable "non_overridable_arguments" { + type = map(string) + description = "Non-overridable arguments for this job, specified as name-value pairs" + default = null +} + +variable "security_configuration" { + type = string + description = "The name of the Security Configuration to be associated with the job" + default = null +} + +variable "timeout" { + type = number + description = "The job timeout in minutes. The default is 2880 minutes (48 hours) for `glueetl` and `pythonshell` jobs, and `null` (unlimited) for `gluestreaming` jobs" + default = 2880 +} + +variable "max_capacity" { + type = number + description = "The maximum number of AWS Glue data processing units (DPUs) that can be allocated when the job runs. Required when `pythonshell` is set, accept either 0.0625 or 1.0. Use `number_of_workers` and `worker_type` arguments instead with `glue_version` 2.0 and above" + default = null +} + +variable "max_retries" { + type = number + description = " The maximum number of times to retry the job if it fails" + default = null +} + +variable "worker_type" { + type = string + description = "The type of predefined worker that is allocated when a job runs. Accepts a value of `Standard`, `G.1X`, or `G.2X`" + default = null +} + +variable "number_of_workers" { + type = number + description = "The number of workers of a defined `worker_type` that are allocated when a job runs" + default = null +} + +variable "command" { + # type = object({ + # # The name of the job command. Defaults to glueetl. + # # Use `pythonshell` for Python Shell Job Type, or `gluestreaming` for Streaming Job Type. + # # `max_capacity` needs to be set if `pythonshell` is chosen + # name = string + # # Specifies the S3 path to a script that executes the job + # script_location = string + # # The Python version being used to execute a Python shell job. Allowed values are 2 or 3 + # python_version = number + # }) + + # Using `type = map(any)` since some of the the fields are optional and we don't want to force the caller to specify all of them and set to `null` those not used + type = map(any) + description = "The command of the job" + default = null +} + +variable "execution_property" { + type = object({ + # The maximum number of concurrent runs allowed for the job. The default is 1. + max_concurrent_runs = number + }) + description = "Execution property of the job" + default = null +} + +variable "notification_property" { + type = object({ + # After a job run starts, the number of minutes to wait before sending a job run delay notification + notify_delay_after = number + }) + description = "Notification property of the job" + default = null +} + +variable "glue_iam_component_name" { + type = string + description = "Glue IAM component name. Used to get the Glue IAM role from the remote state" + default = "glue/iam" +} + +variable "glue_job_s3_bucket_component_name" { + type = string + description = "Glue job S3 bucket component name. Used to get the remote state of the S3 bucket where the Glue job script is located" + default = null +} + +variable "glue_job_s3_bucket_script_path" { + type = string + description = "Glue job script path in the S3 bucket" + default = null +} + +variable "glue_job_command_name" { + type = string + description = "The name of the job command. Defaults to glueetl. Use pythonshell for Python Shell Job Type, or gluestreaming for Streaming Job Type. max_capacity needs to be set if pythonshell is chosen" + default = "glueetl" +} + +variable "glue_job_command_python_version" { + type = number + description = "The Python version being used to execute a Python shell job. Allowed values are 2, 3 or 3.9. Version 3 refers to Python 3.6" + default = 3 +} diff --git a/modules/glue/job/versions.tf b/modules/glue/job/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/job/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/registry/README.md b/modules/glue/registry/README.md new file mode 100644 index 000000000..0ad49a19d --- /dev/null +++ b/modules/glue/registry/README.md @@ -0,0 +1,96 @@ +--- +tags: + - component/glue/registry + - layer/unassigned + - provider/aws +--- + +# Component: `glue/registry` + +This component provisions Glue registries. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/registry/example: + metadata: + component: glue/registry + vars: + enabled: true + name: example + registry_name: example + registry_description: "Glue registry example" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_registry](#module\_glue\_registry) | cloudposse/glue/aws//modules/glue-registry | 0.4.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [registry\_description](#input\_registry\_description) | Glue registry description | `string` | `null` | no | +| [registry\_name](#input\_registry\_name) | Glue registry name. If not provided, the name will be generated from the context | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [registry\_arn](#output\_registry\_arn) | Glue registry ARN | +| [registry\_id](#output\_registry\_id) | Glue registry ID | +| [registry\_name](#output\_registry\_name) | Glue registry name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/registry) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/registry/context.tf b/modules/glue/registry/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/registry/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/registry/main.tf b/modules/glue/registry/main.tf new file mode 100644 index 000000000..ae4e7c00a --- /dev/null +++ b/modules/glue/registry/main.tf @@ -0,0 +1,9 @@ +module "glue_registry" { + source = "cloudposse/glue/aws//modules/glue-registry" + version = "0.4.0" + + registry_name = var.registry_name + registry_description = var.registry_description + + context = module.this.context +} diff --git a/modules/glue/registry/outputs.tf b/modules/glue/registry/outputs.tf new file mode 100644 index 000000000..b0a6b01e4 --- /dev/null +++ b/modules/glue/registry/outputs.tf @@ -0,0 +1,14 @@ +output "registry_id" { + description = "Glue registry ID" + value = module.glue_registry.id +} + +output "registry_name" { + description = "Glue registry name" + value = module.glue_registry.name +} + +output "registry_arn" { + description = "Glue registry ARN" + value = module.glue_registry.arn +} diff --git a/modules/glue/registry/providers.tf b/modules/glue/registry/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/registry/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/registry/variables.tf b/modules/glue/registry/variables.tf new file mode 100644 index 000000000..0f7216417 --- /dev/null +++ b/modules/glue/registry/variables.tf @@ -0,0 +1,16 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "registry_name" { + type = string + description = "Glue registry name. If not provided, the name will be generated from the context" + default = null +} + +variable "registry_description" { + type = string + description = "Glue registry description" + default = null +} diff --git a/modules/glue/registry/versions.tf b/modules/glue/registry/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/registry/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/schema/README.md b/modules/glue/schema/README.md new file mode 100644 index 000000000..d0bdb857e --- /dev/null +++ b/modules/glue/schema/README.md @@ -0,0 +1,107 @@ +--- +tags: + - component/glue/schema + - layer/unassigned + - provider/aws +--- + +# Component: `glue/schema` + +This component provisions Glue schemas. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/schema/example: + metadata: + component: glue/schema + vars: + enabled: true + name: example + schema_name: example + schema_description: "Glue schema example" + data_format: JSON + glue_registry_component_name: "glue/registry/example" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_registry](#module\_glue\_registry) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [glue\_schema](#module\_glue\_schema) | cloudposse/glue/aws//modules/glue-schema | 0.4.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [compatibility](#input\_compatibility) | The compatibility mode of the schema. Valid values are NONE, DISABLED, BACKWARD, BACKWARD\_ALL, FORWARD, FORWARD\_ALL, FULL, and FULL\_ALL | `string` | `"NONE"` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [data\_format](#input\_data\_format) | The data format of the schema definition. Valid values are `AVRO`, `JSON` and `PROTOBUF` | `string` | `"JSON"` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [glue\_registry\_component\_name](#input\_glue\_registry\_component\_name) | Glue registry component name. Used to get the Glue registry from the remote state | `string` | n/a | yes | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [schema\_definition](#input\_schema\_definition) | The schema definition using the `data_format` setting | `string` | `null` | no | +| [schema\_description](#input\_schema\_description) | Glue schema description | `string` | `null` | no | +| [schema\_name](#input\_schema\_name) | Glue schema name. If not provided, the name will be generated from the context | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [latest\_schema\_version](#output\_latest\_schema\_version) | The latest version of the schema associated with the returned schema definition | +| [next\_schema\_version](#output\_next\_schema\_version) | The next version of the schema associated with the returned schema definition | +| [registry\_name](#output\_registry\_name) | Glue registry name | +| [schema\_arn](#output\_schema\_arn) | Glue schema ARN | +| [schema\_checkpoint](#output\_schema\_checkpoint) | The version number of the checkpoint (the last time the compatibility mode was changed) | +| [schema\_id](#output\_schema\_id) | Glue schema ID | +| [schema\_name](#output\_schema\_name) | Glue schema name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/schema) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/schema/context.tf b/modules/glue/schema/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/schema/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/schema/main.tf b/modules/glue/schema/main.tf new file mode 100644 index 000000000..aa836076f --- /dev/null +++ b/modules/glue/schema/main.tf @@ -0,0 +1,13 @@ +module "glue_schema" { + source = "cloudposse/glue/aws//modules/glue-schema" + version = "0.4.0" + + schema_name = var.schema_name + schema_description = var.schema_description + registry_arn = module.glue_registry.outputs.registry_arn + data_format = var.data_format + compatibility = var.compatibility + schema_definition = var.schema_definition + + context = module.this.context +} diff --git a/modules/glue/schema/outputs.tf b/modules/glue/schema/outputs.tf new file mode 100644 index 000000000..12cc3a666 --- /dev/null +++ b/modules/glue/schema/outputs.tf @@ -0,0 +1,34 @@ +output "schema_id" { + description = "Glue schema ID" + value = module.glue_schema.id +} + +output "schema_name" { + description = "Glue schema name" + value = module.glue_schema.name +} + +output "schema_arn" { + description = "Glue schema ARN" + value = module.glue_schema.arn +} + +output "registry_name" { + description = "Glue registry name" + value = module.glue_schema.registry_name +} + +output "latest_schema_version" { + description = "The latest version of the schema associated with the returned schema definition" + value = module.glue_schema.latest_schema_version +} + +output "next_schema_version" { + description = "The next version of the schema associated with the returned schema definition" + value = module.glue_schema.next_schema_version +} + +output "schema_checkpoint" { + description = "The version number of the checkpoint (the last time the compatibility mode was changed)" + value = module.glue_schema.schema_checkpoint +} diff --git a/modules/glue/schema/providers.tf b/modules/glue/schema/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/schema/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/schema/remote-state.tf b/modules/glue/schema/remote-state.tf new file mode 100644 index 000000000..51975c614 --- /dev/null +++ b/modules/glue/schema/remote-state.tf @@ -0,0 +1,8 @@ +module "glue_registry" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_registry_component_name + + context = module.this.context +} diff --git a/modules/glue/schema/variables.tf b/modules/glue/schema/variables.tf new file mode 100644 index 000000000..786f64504 --- /dev/null +++ b/modules/glue/schema/variables.tf @@ -0,0 +1,49 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "schema_name" { + type = string + description = "Glue schema name. If not provided, the name will be generated from the context" + default = null +} + +variable "schema_description" { + type = string + description = "Glue schema description" + default = null +} + +variable "data_format" { + type = string + description = "The data format of the schema definition. Valid values are `AVRO`, `JSON` and `PROTOBUF`" + default = "JSON" + + validation { + condition = contains(["AVRO", "JSON", "PROTOBUF"], var.data_format) + error_message = "Supported options are AVRO, JSON or PROTOBUF" + } +} + +variable "compatibility" { + type = string + description = "The compatibility mode of the schema. Valid values are NONE, DISABLED, BACKWARD, BACKWARD_ALL, FORWARD, FORWARD_ALL, FULL, and FULL_ALL" + default = "NONE" + + validation { + condition = contains(["NONE", "DISABLED", "BACKWARD", "BACKWARD_ALL", "FORWARD", "FORWARD_ALL", "FULL", "FULL_ALL"], var.compatibility) + error_message = "Supported options are NONE, DISABLED, BACKWARD, BACKWARD_ALL, FORWARD, FORWARD_ALL, FULL, and FULL_ALL" + } +} + +variable "schema_definition" { + type = string + description = "The schema definition using the `data_format` setting" + default = null +} + +variable "glue_registry_component_name" { + type = string + description = "Glue registry component name. Used to get the Glue registry from the remote state" +} diff --git a/modules/glue/schema/versions.tf b/modules/glue/schema/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/schema/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/trigger/README.md b/modules/glue/trigger/README.md new file mode 100644 index 000000000..c9ba1b6ee --- /dev/null +++ b/modules/glue/trigger/README.md @@ -0,0 +1,115 @@ +--- +tags: + - component/glue/trigger + - layer/unassigned + - provider/aws +--- + +# Component: `glue/trigger` + +This component provisions Glue triggers. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/trigger/example: + metadata: + component: glue/trigger + vars: + enabled: true + name: example + trigger_name: example + trigger_description: "Glue trigger example" + glue_workflow_component_name: "glue/workflow/example" + glue_job_component_name: "glue/job/example" + glue_job_timeout: 10 + trigger_enabled: true + start_on_creation: true + schedule: "cron(15 12 * * ? *)" + type: SCHEDULED +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_job](#module\_glue\_job) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [glue\_trigger](#module\_glue\_trigger) | cloudposse/glue/aws//modules/glue-trigger | 0.4.0 | +| [glue\_workflow](#module\_glue\_workflow) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [actions](#input\_actions) | List of actions initiated by the trigger when it fires | `list(any)` | `null` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [event\_batching\_condition](#input\_event\_batching\_condition) | Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires | `map(number)` | `null` | no | +| [glue\_job\_component\_name](#input\_glue\_job\_component\_name) | Glue workflow job name. Used to get the Glue job from the remote state | `string` | `null` | no | +| [glue\_job\_timeout](#input\_glue\_job\_timeout) | The job run timeout in minutes. It overrides the timeout value of the job | `number` | `null` | no | +| [glue\_workflow\_component\_name](#input\_glue\_workflow\_component\_name) | Glue workflow component name. Used to get the Glue workflow from the remote state | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [predicate](#input\_predicate) | A predicate to specify when the new trigger should fire. Required when trigger type is `CONDITIONAL` | `any` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [schedule](#input\_schedule) | Cron formatted schedule. Required for triggers with type `SCHEDULED` | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [start\_on\_creation](#input\_start\_on\_creation) | Set to `true` to start `SCHEDULED` and `CONDITIONAL` triggers when created. `true` is not supported for `ON_DEMAND` triggers | `bool` | `true` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [trigger\_description](#input\_trigger\_description) | Glue trigger description | `string` | `null` | no | +| [trigger\_enabled](#input\_trigger\_enabled) | Whether to start the created trigger | `bool` | `true` | no | +| [trigger\_name](#input\_trigger\_name) | Glue trigger name. If not provided, the name will be generated from the context | `string` | `null` | no | +| [type](#input\_type) | The type of trigger. Options are CONDITIONAL, SCHEDULED or ON\_DEMAND | `string` | `"CONDITIONAL"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [trigger\_arn](#output\_trigger\_arn) | Glue trigger ARN | +| [trigger\_id](#output\_trigger\_id) | Glue trigger ID | +| [trigger\_name](#output\_trigger\_name) | Glue trigger name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/trigger) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/trigger/context.tf b/modules/glue/trigger/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/trigger/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/trigger/main.tf b/modules/glue/trigger/main.tf new file mode 100644 index 000000000..c6e9dc5c1 --- /dev/null +++ b/modules/glue/trigger/main.tf @@ -0,0 +1,27 @@ +locals { + actions = var.actions != null ? var.actions : [ + { + job_name = module.glue_job.outputs.job_name + # The job run timeout in minutes. It overrides the timeout value of the job + timeout = var.glue_job_timeout + } + ] +} + +module "glue_trigger" { + source = "cloudposse/glue/aws//modules/glue-trigger" + version = "0.4.0" + + trigger_name = var.trigger_name + trigger_description = var.trigger_description + workflow_name = module.glue_workflow.outputs.workflow_name + type = var.type + actions = local.actions + predicate = var.predicate + event_batching_condition = var.event_batching_condition + schedule = var.schedule + trigger_enabled = var.trigger_enabled + start_on_creation = var.start_on_creation + + context = module.this.context +} diff --git a/modules/glue/trigger/outputs.tf b/modules/glue/trigger/outputs.tf new file mode 100644 index 000000000..d850f4f6b --- /dev/null +++ b/modules/glue/trigger/outputs.tf @@ -0,0 +1,14 @@ +output "trigger_id" { + description = "Glue trigger ID" + value = module.glue_trigger.id +} + +output "trigger_name" { + description = "Glue trigger name" + value = module.glue_trigger.name +} + +output "trigger_arn" { + description = "Glue trigger ARN" + value = module.glue_trigger.arn +} diff --git a/modules/glue/trigger/providers.tf b/modules/glue/trigger/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/trigger/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/trigger/remote-state.tf b/modules/glue/trigger/remote-state.tf new file mode 100644 index 000000000..c5a1fcfc5 --- /dev/null +++ b/modules/glue/trigger/remote-state.tf @@ -0,0 +1,31 @@ +module "glue_workflow" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_workflow_component_name + bypass = var.glue_workflow_component_name == null + + defaults = { + workflow_id = null + workflow_name = null + workflow_arn = null + } + + context = module.this.context +} + +module "glue_job" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.glue_job_component_name + bypass = var.glue_job_component_name == null + + defaults = { + job_id = null + job_name = null + job_arn = null + } + + context = module.this.context +} diff --git a/modules/glue/trigger/variables.tf b/modules/glue/trigger/variables.tf new file mode 100644 index 000000000..39c7f5abc --- /dev/null +++ b/modules/glue/trigger/variables.tf @@ -0,0 +1,107 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "trigger_name" { + type = string + description = "Glue trigger name. If not provided, the name will be generated from the context" + default = null +} + +variable "trigger_description" { + type = string + description = "Glue trigger description" + default = null +} + +variable "type" { + type = string + description = "The type of trigger. Options are CONDITIONAL, SCHEDULED or ON_DEMAND" + default = "CONDITIONAL" + + validation { + condition = contains(["CONDITIONAL", "SCHEDULED", "ON_DEMAND"], var.type) + error_message = "Supported options are CONDITIONAL, SCHEDULED or ON_DEMAND" + } +} + +variable "predicate" { + # type = object({ + # # How to handle multiple conditions. Defaults to `AND`. Valid values are `AND` or `ANY` + # logical = string + # # Conditions for activating the trigger. Required for triggers where type is `CONDITIONAL` + # conditions = list(object({ + # job_name = string + # crawler_name = string + # state = string + # crawl_state = string + # logical_operator = string + # })) + # }) + type = any + description = "A predicate to specify when the new trigger should fire. Required when trigger type is `CONDITIONAL`" + default = null +} + +variable "event_batching_condition" { + # type = object({ + # batch_size = number + # batch_window = number + # }) + type = map(number) + description = "Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires" + default = null +} + +variable "schedule" { + type = string + description = "Cron formatted schedule. Required for triggers with type `SCHEDULED`" + default = null +} + +variable "trigger_enabled" { + type = bool + description = "Whether to start the created trigger" + default = true +} + +variable "start_on_creation" { + type = bool + description = "Set to `true` to start `SCHEDULED` and `CONDITIONAL` triggers when created. `true` is not supported for `ON_DEMAND` triggers" + default = true +} + +variable "glue_workflow_component_name" { + type = string + description = "Glue workflow component name. Used to get the Glue workflow from the remote state" + default = null +} + +variable "glue_job_component_name" { + type = string + description = "Glue workflow job name. Used to get the Glue job from the remote state" + default = null +} + +variable "glue_job_timeout" { + type = number + description = "The job run timeout in minutes. It overrides the timeout value of the job" + default = null +} + +variable "actions" { + # type = list(object({ + # job_name = string + # crawler_name = string + # arguments = map(string) + # security_configuration = string + # notification_property = object({ + # notify_delay_after = number + # }) + # timeout = number + # })) + type = list(any) + description = "List of actions initiated by the trigger when it fires" + default = null +} diff --git a/modules/glue/trigger/versions.tf b/modules/glue/trigger/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/trigger/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/glue/workflow/README.md b/modules/glue/workflow/README.md new file mode 100644 index 000000000..576ed5c3b --- /dev/null +++ b/modules/glue/workflow/README.md @@ -0,0 +1,98 @@ +--- +tags: + - component/glue/workflow + - layer/unassigned + - provider/aws +--- + +# Component: `glue/workflow` + +This component provisions Glue workflows. + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + glue/workflow/example: + metadata: + component: "glue/workflow" + vars: + enabled: true + name: example + workflow_name: example + workflow_description: "Glue workflow example" +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [utils](#requirement\_utils) | >= 1.15.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [glue\_workflow](#module\_glue\_workflow) | cloudposse/glue/aws//modules/glue-workflow | 0.4.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [default\_run\_properties](#input\_default\_run\_properties) | A map of default run properties for this workflow. These properties are passed to all jobs associated to the workflow | `map(string)` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [max\_concurrent\_runs](#input\_max\_concurrent\_runs) | Maximum number of concurrent runs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs | `number` | `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [workflow\_description](#input\_workflow\_description) | Glue workflow description | `string` | `null` | no | +| [workflow\_name](#input\_workflow\_name) | Glue workflow name. If not provided, the name will be generated from the context | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [workflow\_arn](#output\_workflow\_arn) | Glue workflow ARN | +| [workflow\_id](#output\_workflow\_id) | Glue workflow ID | +| [workflow\_name](#output\_workflow\_name) | Glue workflow name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/glue/workflow) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/glue/workflow/context.tf b/modules/glue/workflow/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/glue/workflow/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/glue/workflow/main.tf b/modules/glue/workflow/main.tf new file mode 100644 index 000000000..66499ae4d --- /dev/null +++ b/modules/glue/workflow/main.tf @@ -0,0 +1,11 @@ +module "glue_workflow" { + source = "cloudposse/glue/aws//modules/glue-workflow" + version = "0.4.0" + + workflow_name = var.workflow_name + workflow_description = var.workflow_description + default_run_properties = var.default_run_properties + max_concurrent_runs = var.max_concurrent_runs + + context = module.this.context +} diff --git a/modules/glue/workflow/outputs.tf b/modules/glue/workflow/outputs.tf new file mode 100644 index 000000000..d6b4779ad --- /dev/null +++ b/modules/glue/workflow/outputs.tf @@ -0,0 +1,14 @@ +output "workflow_id" { + description = "Glue workflow ID" + value = module.glue_workflow.id +} + +output "workflow_name" { + description = "Glue workflow name" + value = module.glue_workflow.name +} + +output "workflow_arn" { + description = "Glue workflow ARN" + value = module.glue_workflow.arn +} diff --git a/modules/glue/workflow/providers.tf b/modules/glue/workflow/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/glue/workflow/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/glue/workflow/variables.tf b/modules/glue/workflow/variables.tf new file mode 100644 index 000000000..5904f2b05 --- /dev/null +++ b/modules/glue/workflow/variables.tf @@ -0,0 +1,28 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "workflow_name" { + type = string + description = "Glue workflow name. If not provided, the name will be generated from the context" + default = null +} + +variable "workflow_description" { + type = string + description = "Glue workflow description" + default = null +} + +variable "default_run_properties" { + type = map(string) + description = "A map of default run properties for this workflow. These properties are passed to all jobs associated to the workflow" + default = null +} + +variable "max_concurrent_runs" { + type = number + description = "Maximum number of concurrent runs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs" + default = null +} diff --git a/modules/glue/workflow/versions.tf b/modules/glue/workflow/versions.tf new file mode 100644 index 000000000..f4c416ee6 --- /dev/null +++ b/modules/glue/workflow/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.15.0" + } + } +} diff --git a/modules/guardduty/README.md b/modules/guardduty/README.md new file mode 100644 index 000000000..d0be2016f --- /dev/null +++ b/modules/guardduty/README.md @@ -0,0 +1,248 @@ +--- +tags: + - component/guardduty + - layer/security-and-compliance + - provider/aws +--- + +# Component: `guardduty` + +This component is responsible for configuring GuardDuty within an AWS Organization. + +AWS GuardDuty is a managed threat detection service. It is designed to help protect AWS accounts and workloads by +continuously monitoring for malicious activities and unauthorized behaviors. To detect potential security threats, +GuardDuty analyzes various data sources within your AWS environment, such as AWS CloudTrail logs, VPC Flow Logs, and DNS +logs. + +Key features and components of AWS GuardDuty include: + +- Threat detection: GuardDuty employs machine learning algorithms, anomaly detection, and integrated threat intelligence + to identify suspicious activities, unauthorized access attempts, and potential security threats. It analyzes event + logs and network traffic data to detect patterns, anomalies, and known attack techniques. + +- Threat intelligence: GuardDuty leverages threat intelligence feeds from AWS, trusted partners, and the global + community to enhance its detection capabilities. It uses this intelligence to identify known malicious IP addresses, + domains, and other indicators of compromise. + +- Real-time alerts: When GuardDuty identifies a potential security issue, it generates real-time alerts that can be + delivered through AWS CloudWatch Events. These alerts can be integrated with other AWS services like Amazon SNS or AWS + Lambda for immediate action or custom response workflows. + +- Multi-account support: GuardDuty can be enabled across multiple AWS accounts, allowing centralized management and + monitoring of security across an entire organization's AWS infrastructure. This helps to maintain consistent security + policies and practices. + +- Automated remediation: GuardDuty integrates with other AWS services, such as AWS Macie, AWS Security Hub, and AWS + Systems Manager, to facilitate automated threat response and remediation actions. This helps to minimize the impact of + security incidents and reduces the need for manual intervention. + +- Security findings and reports: GuardDuty provides detailed security findings and reports that include information + about detected threats, affected AWS resources, and recommended remediation actions. These findings can be accessed + through the AWS Management Console or retrieved via APIs for further analysis and reporting. + +GuardDuty offers a scalable and flexible approach to threat detection within AWS environments, providing organizations +with an additional layer of security to proactively identify and respond to potential security risks. + +## Usage + +**Stack Level**: Regional + +## Deployment Overview + +This component is complex in that it must be deployed multiple times with different variables set to configure the AWS +Organization successfully. + +It is further complicated by the fact that you must deploy each of the the component instances described below to every +region that existed before March 2019 and to any regions that have been opted-in as described in the +[AWS Documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-regions). + +In the examples below, we assume that the AWS Organization Management account is `root` and the AWS Organization +Delegated Administrator account is `security`, both in the `core` tenant. + +### Deploy to Delegated Administrator Account + +First, the component is deployed to the +[Delegated Administrator](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_organizations.html) account in each +region in order to configure the central GuardDuty detector that each account will send its findings to. + +```yaml +# core-ue1-security +components: + terraform: + guardduty/delegated-administrator/ue1: + metadata: + component: guardduty + vars: + enabled: true + delegated_administrator_account_name: core-security + environment: ue1 + region: us-east-1 +``` + +```bash +atmos terraform apply guardduty/delegated-administrator/ue1 -s core-ue1-security +atmos terraform apply guardduty/delegated-administrator/ue2 -s core-ue2-security +atmos terraform apply guardduty/delegated-administrator/uw1 -s core-uw1-security +# ... other regions +``` + +### Deploy to Organization Management (root) Account + +Next, the component is deployed to the AWS Organization Management, a/k/a `root`, Account in order to set the AWS +Organization Designated Administrator account. + +Note that you must use the `SuperAdmin` permissions as we are deploying to the AWS Organization Management account. Since +we are using the `SuperAdmin` user, it will already have access to the state bucket, so we set the `role_arn` of the +backend config to null and set `var.privileged` to `true`. + +```yaml +# core-ue1-root +components: + terraform: + guardduty/root/ue1: + metadata: + component: guardduty + backend: + s3: + role_arn: null + vars: + enabled: true + delegated_administrator_account_name: core-security + environment: ue1 + region: us-east-1 + privileged: true +``` + +```bash +atmos terraform apply guardduty/root/ue1 -s core-ue1-root +atmos terraform apply guardduty/root/ue2 -s core-ue2-root +atmos terraform apply guardduty/root/uw1 -s core-uw1-root +# ... other regions +``` + +### Deploy Organization Settings in Delegated Administrator Account + +Finally, the component is deployed to the Delegated Administrator Account again in order to create the organization-wide +configuration for the AWS Organization, but with `var.admin_delegated` set to `true` to indicate that the delegation has +already been performed from the Organization Management account. + +```yaml +# core-ue1-security +components: + terraform: + guardduty/org-settings/ue1: + metadata: + component: guardduty + vars: + enabled: true + delegated_administrator_account_name: core-security + environment: use1 + region: us-east-1 + admin_delegated: true +``` + +```bash +atmos terraform apply guardduty/org-settings/ue1 -s core-ue1-security +atmos terraform apply guardduty/org-settings/ue2 -s core-ue2-security +atmos terraform apply guardduty/org-settings/uw1 -s core-uw1-security +# ... other regions +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 5.0 | +| [awsutils](#requirement\_awsutils) | >= 0.16.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 5.0 | +| [awsutils](#provider\_awsutils) | >= 0.16.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [guardduty](#module\_guardduty) | cloudposse/guardduty/aws | 0.5.0 | +| [guardduty\_delegated\_detector](#module\_guardduty\_delegated\_detector) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_guardduty_detector_feature.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector_feature) | resource | +| [aws_guardduty_organization_admin_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_organization_admin_account) | resource | +| [aws_guardduty_organization_configuration.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_organization_configuration) | resource | +| [awsutils_guardduty_organization_settings.this](https://registry.terraform.io/providers/cloudposse/awsutils/latest/docs/resources/guardduty_organization_settings) | resource | +| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant](#input\_account\_map\_tenant) | The tenant where the `account_map` component required by remote-state is deployed | `string` | `"core"` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_delegated](#input\_admin\_delegated) | A flag to indicate if the AWS Organization-wide settings should be created. This can only be done after the GuardDuty
Administrator account has already been delegated from the AWS Org Management account (usually 'root'). See the
Deployment section of the README for more information. | `bool` | `false` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [auto\_enable\_organization\_members](#input\_auto\_enable\_organization\_members) | Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. Valid values are `ALL`, `NEW`, `NONE`.

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_organization_configuration#auto_enable_organization_members | `string` | `"NEW"` | no | +| [cloudwatch\_enabled](#input\_cloudwatch\_enabled) | Flag to indicate whether CloudWatch logging should be enabled for GuardDuty | `bool` | `false` | no | +| [cloudwatch\_event\_rule\_pattern\_detail\_type](#input\_cloudwatch\_event\_rule\_pattern\_detail\_type) | The detail-type pattern used to match events that will be sent to SNS.

For more information, see:
https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html
https://docs.aws.amazon.com/eventbridge/latest/userguide/event-types.html
https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings_cloudwatch.html | `string` | `"GuardDuty Finding"` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_sns\_topic](#input\_create\_sns\_topic) | Flag to indicate whether an SNS topic should be created for notifications. If you want to send findings to a new SNS
topic, set this to true and provide a valid configuration for subscribers. | `bool` | `false` | no | +| [delegated\_admininstrator\_component\_name](#input\_delegated\_admininstrator\_component\_name) | The name of the component that created the GuardDuty detector. | `string` | `"guardduty/delegated-administrator"` | no | +| [delegated\_administrator\_account\_name](#input\_delegated\_administrator\_account\_name) | The name of the account that is the AWS Organization Delegated Administrator account | `string` | `"core-security"` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [detector\_features](#input\_detector\_features) | A map of detector features for streaming foundational data sources to detect communication with known malicious domains and IP addresses and identify anomalous behavior.

For more information, see:
https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-features-activation-model.html#guardduty-features

feature\_name:
The name of the detector feature. Possible values include: S3\_DATA\_EVENTS, EKS\_AUDIT\_LOGS, EBS\_MALWARE\_PROTECTION, RDS\_LOGIN\_EVENTS, EKS\_RUNTIME\_MONITORING, LAMBDA\_NETWORK\_LOGS, RUNTIME\_MONITORING. Specifying both EKS Runtime Monitoring (EKS\_RUNTIME\_MONITORING) and Runtime Monitoring (RUNTIME\_MONITORING) will cause an error. You can add only one of these two features because Runtime Monitoring already includes the threat detection for Amazon EKS resources. For more information, see: https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html.
status:
The status of the detector feature. Valid values include: ENABLED or DISABLED.
additional\_configuration:
Optional information about the additional configuration for a feature in your GuardDuty account. For more information, see: https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorAdditionalConfiguration.html.
addon\_name:
The name of the add-on for which the configuration applies. Possible values include: EKS\_ADDON\_MANAGEMENT, ECS\_FARGATE\_AGENT\_MANAGEMENT, and EC2\_AGENT\_MANAGEMENT. For more information, see: https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorAdditionalConfiguration.html.
status:
The status of the add-on. Valid values include: ENABLED or DISABLED. |
map(object({
feature_name = string
status = string
additional_configuration = optional(object({
addon_name = string
status = string
}), null)
}))
| `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [finding\_publishing\_frequency](#input\_finding\_publishing\_frequency) | The frequency of notifications sent for finding occurrences. If the detector is a GuardDuty member account, the value
is determined by the GuardDuty master account and cannot be modified, otherwise it defaults to SIX\_HOURS.

For standalone and GuardDuty master accounts, it must be configured in Terraform to enable drift detection.
Valid values for standalone and master accounts: FIFTEEN\_MINUTES, ONE\_HOUR, SIX\_HOURS."

For more information, see:
https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings_cloudwatch.html#guardduty_findings_cloudwatch_notification_frequency | `string` | `null` | no | +| [findings\_notification\_arn](#input\_findings\_notification\_arn) | The ARN for an SNS topic to send findings notifications to. This is only used if create\_sns\_topic is false.
If you want to send findings to an existing SNS topic, set this to the ARN of the existing topic and set
create\_sns\_topic to false. | `string` | `null` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kubernetes\_audit\_logs\_enabled](#input\_kubernetes\_audit\_logs\_enabled) | If `true`, enables Kubernetes audit logs as a data source for Kubernetes protection.

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector#audit_logs | `bool` | `false` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [malware\_protection\_scan\_ec2\_ebs\_volumes\_enabled](#input\_malware\_protection\_scan\_ec2\_ebs\_volumes\_enabled) | Configure whether Malware Protection is enabled as data source for EC2 instances EBS Volumes in GuardDuty.

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector#malware-protection | `bool` | `false` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [organization\_management\_account\_name](#input\_organization\_management\_account\_name) | The name of the AWS Organization management account | `string` | `null` | no | +| [privileged](#input\_privileged) | true if the default provider already has access to the backend | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (management) account. This is used to lookup account IDs from account names
using the `account-map` component. | `string` | `"root"` | no | +| [s3\_protection\_enabled](#input\_s3\_protection\_enabled) | If `true`, enables S3 protection.

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/guardduty_detector#s3-logs | `bool` | `true` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [subscribers](#input\_subscribers) | A map of subscription configurations for SNS topics

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic_subscription#argument-reference

protocol:
The protocol to use. The possible values for this are: sqs, sms, lambda, application. (http or https are partially
supported, see link) (email is an option but is unsupported in terraform, see link).
endpoint:
The endpoint to send data to, the contents will vary with the protocol. (see link for more information)
endpoint\_auto\_confirms:
Boolean indicating whether the end point is capable of auto confirming subscription e.g., PagerDuty. Default is
false.
raw\_message\_delivery:
Boolean indicating whether or not to enable raw message delivery (the original message is directly passed, not
wrapped in JSON with the original message in the message property). Default is false. |
map(object({
protocol = string
endpoint = string
endpoint_auto_confirms = bool
raw_message_delivery = bool
}))
| `{}` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [delegated\_administrator\_account\_id](#output\_delegated\_administrator\_account\_id) | The AWS Account ID of the AWS Organization delegated administrator account | +| [guardduty\_detector\_arn](#output\_guardduty\_detector\_arn) | The ARN of the GuardDuty detector created by the component | +| [guardduty\_detector\_id](#output\_guardduty\_detector\_id) | The ID of the GuardDuty detector created by the component | +| [sns\_topic\_name](#output\_sns\_topic\_name) | The name of the SNS topic created by the component | +| [sns\_topic\_subscriptions](#output\_sns\_topic\_subscriptions) | The SNS topic subscriptions created by the component | + + + +## References + +- [AWS GuardDuty Documentation](https://aws.amazon.com/guardduty/) +- [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/guardduty/common/) + +[](https://cpco.io/component) diff --git a/modules/guardduty/context.tf b/modules/guardduty/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/guardduty/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/guardduty/main.tf b/modules/guardduty/main.tf new file mode 100644 index 000000000..633e5d59c --- /dev/null +++ b/modules/guardduty/main.tf @@ -0,0 +1,98 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map + + current_account_id = one(data.aws_caller_identity.this[*].account_id) + member_account_id_list = [for a in keys(local.account_map) : (local.account_map[a]) if local.account_map[a] != local.current_account_id] + org_delegated_administrator_account_id = local.account_map[var.delegated_administrator_account_name] + org_management_account_id = var.organization_management_account_name == null ? local.account_map[module.account_map.outputs.root_account_account_name] : local.account_map[var.organization_management_account_name] + is_org_delegated_administrator_account = local.current_account_id == local.org_delegated_administrator_account_id + is_org_management_account = local.current_account_id == local.org_management_account_id + + create_sns_topic = local.enabled && var.create_sns_topic + create_guardduty_collector = local.enabled && ((local.is_org_delegated_administrator_account && !var.admin_delegated) || local.is_org_management_account) + create_org_delegation = local.enabled && local.is_org_management_account + create_org_configuration = local.enabled && local.is_org_delegated_administrator_account && var.admin_delegated +} + +data "aws_caller_identity" "this" { + count = local.enabled ? 1 : 0 +} + +# If we are are in the AWS Org management account, delegate GuardDuty to the org administrator account +# (usually the security account) +resource "aws_guardduty_organization_admin_account" "this" { + count = local.create_org_delegation ? 1 : 0 + + admin_account_id = local.org_delegated_administrator_account_id +} + +# If we are are in the AWS Org designated administrator account, enable the GuardDuty detector and optionally create an +# SNS topic for notifications and CloudWatch event rules for findings +module "guardduty" { + count = local.create_guardduty_collector ? 1 : 0 + source = "cloudposse/guardduty/aws" + version = "0.5.0" + + finding_publishing_frequency = var.finding_publishing_frequency + create_sns_topic = local.create_sns_topic + findings_notification_arn = var.findings_notification_arn + subscribers = var.subscribers + enable_cloudwatch = var.cloudwatch_enabled + cloudwatch_event_rule_pattern_detail_type = var.cloudwatch_event_rule_pattern_detail_type + s3_protection_enabled = var.s3_protection_enabled + + context = module.this.context +} + +# If we are are in the AWS Org designated administrator account, set the AWS Org-wide GuardDuty configuration by +# configuring all other accounts to send their GuardDuty findings to the detector in this account. +# +# This also configures the various Data Sources. +resource "awsutils_guardduty_organization_settings" "this" { + count = local.create_org_configuration ? 1 : 0 + + member_accounts = local.member_account_id_list + detector_id = module.guardduty_delegated_detector[0].outputs.guardduty_detector_id +} + +resource "aws_guardduty_organization_configuration" "this" { + count = local.create_org_configuration ? 1 : 0 + + auto_enable_organization_members = var.auto_enable_organization_members + detector_id = module.guardduty_delegated_detector[0].outputs.guardduty_detector_id + + datasources { + s3_logs { + auto_enable = var.s3_protection_enabled + } + kubernetes { + audit_logs { + enable = var.kubernetes_audit_logs_enabled + } + } + malware_protection { + scan_ec2_instance_with_findings { + ebs_volumes { + auto_enable = var.malware_protection_scan_ec2_ebs_volumes_enabled + } + } + } + } +} + +resource "aws_guardduty_detector_feature" "this" { + for_each = { for k, v in var.detector_features : k => v if local.create_org_configuration } + + detector_id = module.guardduty_delegated_detector[0].outputs.guardduty_detector_id + name = each.value.feature_name + status = each.value.status + + dynamic "additional_configuration" { + for_each = each.value.additional_configuration != null ? [each.value.additional_configuration] : [] + content { + name = additional_configuration.value.addon_name + status = additional_configuration.value.status + } + } +} diff --git a/modules/guardduty/outputs.tf b/modules/guardduty/outputs.tf new file mode 100644 index 000000000..bfffac9a0 --- /dev/null +++ b/modules/guardduty/outputs.tf @@ -0,0 +1,24 @@ +output "delegated_administrator_account_id" { + value = local.org_delegated_administrator_account_id + description = "The AWS Account ID of the AWS Organization delegated administrator account" +} + +output "guardduty_detector_arn" { + value = local.create_guardduty_collector ? try(module.guardduty[0].guardduty_detector.arn, null) : null + description = "The ARN of the GuardDuty detector created by the component" +} + +output "guardduty_detector_id" { + value = local.create_guardduty_collector ? try(module.guardduty[0].guardduty_detector.id, null) : null + description = "The ID of the GuardDuty detector created by the component" +} + +output "sns_topic_name" { + value = local.create_guardduty_collector ? try(module.guardduty[0].sns_topic.name, null) : null + description = "The name of the SNS topic created by the component" +} + +output "sns_topic_subscriptions" { + value = local.create_guardduty_collector ? try(module.guardduty[0].sns_topic_subscriptions, null) : null + description = "The SNS topic subscriptions created by the component" +} diff --git a/modules/guardduty/providers.tf b/modules/guardduty/providers.tf new file mode 100644 index 000000000..e4a566a8a --- /dev/null +++ b/modules/guardduty/providers.tf @@ -0,0 +1,30 @@ +provider "aws" { + region = var.region + + profile = !var.privileged && module.iam_roles.profiles_enabled ? module.iam_roles.terraform_profile_name : null + dynamic "assume_role" { + for_each = var.privileged || module.iam_roles.profiles_enabled || (module.iam_roles.terraform_role_arn == null) ? [] : ["role"] + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +provider "awsutils" { + region = var.region + + profile = !var.privileged && module.iam_roles.profiles_enabled ? module.iam_roles.terraform_profile_name : null + dynamic "assume_role" { + for_each = var.privileged || module.iam_roles.profiles_enabled || (module.iam_roles.terraform_role_arn == null) ? [] : ["role"] + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + privileged = var.privileged + + context = module.this.context +} diff --git a/modules/guardduty/remote-state.tf b/modules/guardduty/remote-state.tf new file mode 100644 index 000000000..a79b03d04 --- /dev/null +++ b/modules/guardduty/remote-state.tf @@ -0,0 +1,27 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + tenant = var.account_map_tenant != "" ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} + +module "guardduty_delegated_detector" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + # If we are creating the delegated detector (because we are in the delegated admin account), then don't try to lookup + # the delegated detector ID from remote state + count = local.create_guardduty_collector ? 0 : 1 + + component = "${var.delegated_admininstrator_component_name}/${module.this.environment}" + stage = replace(var.delegated_administrator_account_name, "${module.this.tenant}-", "") + privileged = var.privileged + + context = module.this.context +} diff --git a/modules/guardduty/variables.tf b/modules/guardduty/variables.tf new file mode 100644 index 000000000..ba11f96dd --- /dev/null +++ b/modules/guardduty/variables.tf @@ -0,0 +1,217 @@ +variable "account_map_tenant" { + type = string + default = "core" + description = "The tenant where the `account_map` component required by remote-state is deployed" +} + +variable "admin_delegated" { + type = bool + default = false + description = < ## Requirements @@ -86,7 +95,7 @@ No resources. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | -| [assume\_role\_actions](#input\_assume\_role\_actions) | The IAM action to be granted by the AssumeRole policy | `list(string)` |
[
"sts:AssumeRole",
"sts:TagSession"
]
| no | +| [assume\_role\_actions](#input\_assume\_role\_actions) | The IAM action to be granted by the AssumeRole policy | `list(string)` |
[
"sts:AssumeRole",
"sts:SetSourceIdentity",
"sts:TagSession"
]
| no | | [assume\_role\_conditions](#input\_assume\_role\_conditions) | List of conditions for the assume role policy |
list(object({
test = string
variable = string
values = list(string)
}))
| `[]` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | @@ -95,8 +104,6 @@ No resources. | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [instance\_profile\_enabled](#input\_instance\_profile\_enabled) | Create EC2 Instance Profile for the role | `bool` | `false` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | @@ -128,9 +135,11 @@ No resources. |------|-------------| | [role](#output\_role) | IAM role module outputs | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/iam-role) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/iam-role) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/iam-role/providers.tf b/modules/iam-role/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/iam-role/providers.tf +++ b/modules/iam-role/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/iam-role/variables.tf b/modules/iam-role/variables.tf index 0ef3b1f9c..3d80279eb 100644 --- a/modules/iam-role/variables.tf +++ b/modules/iam-role/variables.tf @@ -67,7 +67,7 @@ variable "policy_description" { variable "assume_role_actions" { type = list(string) - default = ["sts:AssumeRole", "sts:TagSession"] + default = ["sts:AssumeRole", "sts:SetSourceIdentity", "sts:TagSession"] description = "The IAM action to be granted by the AssumeRole policy" } diff --git a/modules/iam-service-linked-roles/README.md b/modules/iam-service-linked-roles/README.md index d51670da7..5d7f38029 100644 --- a/modules/iam-service-linked-roles/README.md +++ b/modules/iam-service-linked-roles/README.md @@ -1,6 +1,14 @@ +--- +tags: + - component/iam-service-linked-roles + - layer/eks + - provider/aws +--- + # Component: `iam-service-linked-roles` -This component is responsible for provisioning [IAM Service-Linked Roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). +This component is responsible for provisioning +[IAM Service-Linked Roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). ## Usage @@ -15,7 +23,6 @@ components: workspace_enabled: true vars: enabled: true - root_account_tenant_name: core service_linked_roles: spot_amazonaws_com: aws_service_name: "spot.amazonaws.com" @@ -27,16 +34,15 @@ components: ## Service-Linked Roles for EC2 Spot and EC2 Spot Fleet -__Note:__ If you want to use EC2 Spot or Spot Fleet, -you will need to provision the following Service-Linked Roles: +**Note:** If you want to use EC2 Spot or Spot Fleet, you will need to provision the following Service-Linked Roles: - Service-Linked Role for EC2 Spot - Service-Linked Role for EC2 Spot Fleet This is only necessary if this is the first time you're using EC2 Spot and Spot Fleet in the account. -Note that if the Service-Linked Roles already exist in the AWS account (if you used EC2 Spot or Spot Fleet before), -and you try to provision them again, you will see the following errors: +Note that if the Service-Linked Roles already exist in the AWS account (if you used EC2 Spot or Spot Fleet before), and +you try to provision them again, you will see the following errors: ```text An error occurred (InvalidInput) when calling the CreateServiceLinkedRole operation: @@ -47,10 +53,11 @@ Service role name AWSServiceRoleForEC2SpotFleet has been taken in this account, ``` For more details, see: + - https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html - https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html - + ## Requirements @@ -90,8 +97,6 @@ For more details, see: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -111,8 +116,11 @@ For more details, see: |------|-------------| | [service\_linked\_roles](#output\_service\_linked\_roles) | Provisioned Service-Linked roles | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/iam-service-linked-roles) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/iam-service-linked-roles) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/iam-service-linked-roles/providers.tf b/modules/iam-service-linked-roles/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/iam-service-linked-roles/providers.tf +++ b/modules/iam-service-linked-roles/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/ipam/CHANGELOG.md b/modules/ipam/CHANGELOG.md new file mode 100644 index 000000000..5b080efc8 --- /dev/null +++ b/modules/ipam/CHANGELOG.md @@ -0,0 +1 @@ +## Components PR [Fix components](https://github.com/cloudposse/terraform-aws-components/pull/855) diff --git a/modules/ipam/README.md b/modules/ipam/README.md new file mode 100644 index 000000000..b185706df --- /dev/null +++ b/modules/ipam/README.md @@ -0,0 +1,137 @@ +--- +tags: + - component/ipam + - layer/unassigned + - provider/aws +--- + +# Component: `ipam` + +This component is responsible for provisioning IPAM per region in a centralized account. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + ipam: + vars: + enabled: true + top_cidr: [10.96.0.0/11] + pool_configurations: + core: + cidr: [10.96.0.0/12] + locale: us-east-2 + sub_pools: + network: + cidr: [10.96.0.0/16] + ram_share_accounts: [core-network] + auto: + cidr: [10.97.0.0/16] + ram_share_accounts: [core-auto] + corp: + cidr: [10.98.0.0/16] + ram_share_accounts: [core-corp] + plat: + cidr: [10.112.0.0/12] + locale: us-east-2 + sub_pools: + dev: + cidr: [10.112.0.0/16] + ram_share_accounts: [plat-dev] + staging: + cidr: [10.113.0.0/16] + ram_share_accounts: [plat-staging] + prod: + cidr: [10.114.0.0/16] + ram_share_accounts: [plat-prod] + sandbox: + cidr: [10.115.0.0/16] + ram_share_accounts: [plat-sandbox] +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [ipam](#module\_ipam) | aws-ia/ipam/aws | 1.2.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_environment\_name](#input\_account\_map\_environment\_name) | The name of the environment where `account_map` is provisioned | `string` | `"gbl"` | no | +| [account\_map\_stage\_name](#input\_account\_map\_stage\_name) | The name of the stage where `account_map` is provisioned | `string` | `"root"` | no | +| [account\_map\_tenant\_name](#input\_account\_map\_tenant\_name) | The name of the tenant where `account_map` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [address\_family](#input\_address\_family) | IPv4/6 address family. | `string` | `"ipv4"` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [ipam\_scope\_id](#input\_ipam\_scope\_id) | (Optional) Required if `var.ipam_id` is set. Determines which scope to deploy pools into. | `string` | `null` | no | +| [ipam\_scope\_type](#input\_ipam\_scope\_type) | Which scope type to use. Valid inputs include `public` or `private`. You can alternatively provide your own scope ID. | `string` | `"private"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [pool\_configurations](#input\_pool\_configurations) | A multi-level, nested map describing nested IPAM pools. Can nest up to three levels with the top level being outside the `pool_configurations`. This attribute is quite complex, see README.md for further explanation. | `any` | `{}` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [top\_auto\_import](#input\_top\_auto\_import) | `auto_import` setting for top-level pool. | `bool` | `null` | no | +| [top\_cidr](#input\_top\_cidr) | Top-level CIDR blocks. | `list(string)` | n/a | yes | +| [top\_cidr\_authorization\_context](#input\_top\_cidr\_authorization\_context) | A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP. Document is not stored in the state file. For more information, refer to https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_ipam_pool_cidr#cidr_authorization_context. | `any` | `null` | no | +| [top\_description](#input\_top\_description) | Description of top-level pool. | `string` | `""` | no | +| [top\_ram\_share\_principals](#input\_top\_ram\_share\_principals) | Principals to create RAM shares for top-level pool. | `list(string)` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [pool\_configurations](#output\_pool\_configurations) | Pool configurations | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/TODO) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/ipam/context.tf b/modules/ipam/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/ipam/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/ipam/main.tf b/modules/ipam/main.tf new file mode 100644 index 000000000..109d6c042 --- /dev/null +++ b/modules/ipam/main.tf @@ -0,0 +1,58 @@ +locals { + enabled = module.this.enabled + + pool_configurations = { + for pool, poolval in var.pool_configurations : + pool => merge(poolval, { + locale = lookup(poolval, "locale", join("", data.aws_region.current.*.name)) + sub_pools = { + for subpool, subval in poolval.sub_pools : + subpool => merge(subval, { + ram_share_principals = concat( + lookup(subval, "ram_share_principals", []), + tolist( + setsubtract( + [ + for account in lookup(subval, "ram_share_accounts", []) : + module.account_map.outputs.full_account_map[account] + ], + [join("", data.aws_caller_identity.current.*.account_id)] + ) + ) + ) + allocation_resource_tags = merge( + lookup(subval, "allocation_resource_tags", {}), + module.this.tags + ) + }) + } + }) + } +} + +data "aws_caller_identity" "current" { + count = local.enabled ? 1 : 0 +} + +data "aws_region" "current" { + count = local.enabled ? 1 : 0 +} + +module "ipam" { + source = "aws-ia/ipam/aws" + version = "1.2.1" + + count = local.enabled ? 1 : 0 + + create_ipam = local.enabled + + address_family = var.address_family + ipam_scope_id = var.ipam_scope_id + ipam_scope_type = var.ipam_scope_type + pool_configurations = local.pool_configurations + top_auto_import = var.top_auto_import + top_cidr = var.top_cidr + top_cidr_authorization_context = var.top_cidr_authorization_context + top_description = var.top_description + top_ram_share_principals = var.top_ram_share_principals +} diff --git a/modules/ipam/outputs.tf b/modules/ipam/outputs.tf new file mode 100644 index 000000000..054d636b7 --- /dev/null +++ b/modules/ipam/outputs.tf @@ -0,0 +1,4 @@ +output "pool_configurations" { + value = local.pool_configurations + description = "Pool configurations" +} diff --git a/modules/ipam/providers.tf b/modules/ipam/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/ipam/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/ipam/remote-state.tf b/modules/ipam/remote-state.tf new file mode 100644 index 000000000..69f657564 --- /dev/null +++ b/modules/ipam/remote-state.tf @@ -0,0 +1,11 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + environment = var.account_map_environment_name + stage = var.account_map_stage_name + tenant = var.account_map_tenant_name + + context = module.this.context +} diff --git a/modules/ipam/variables.tf b/modules/ipam/variables.tf new file mode 100644 index 000000000..7cf2847cc --- /dev/null +++ b/modules/ipam/variables.tf @@ -0,0 +1,118 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "account_map_environment_name" { + type = string + description = "The name of the environment where `account_map` is provisioned" + default = "gbl" +} + +variable "account_map_stage_name" { + type = string + description = "The name of the stage where `account_map` is provisioned" + default = "root" +} + +variable "account_map_tenant_name" { + type = string + description = <<-EOT + The name of the tenant where `account_map` is provisioned. + + If the `tenant` label is not used, leave this as `null`. + EOT + default = null +} + +# Copied from upstream module's variables.tf + +variable "pool_configurations" { + description = "A multi-level, nested map describing nested IPAM pools. Can nest up to three levels with the top level being outside the `pool_configurations`. This attribute is quite complex, see README.md for further explanation." + type = any + + # Below is an example of the actual expected structure for `pool_configurations`. type = any is currently being used, may adjust in the future + + # type = object({ + # cidr = optional(list(string)) + # ram_share_principals = optional(list(string)) + # locale = optional(string) + # allocation_default_netmask_length = optional(string) + # allocation_max_netmask_length = optional(string) + # allocation_min_netmask_length = optional(string) + # auto_import = optional(string) + # aws_service = optional(string) + # description = optional(string) + # name = optional(string) + # publicly_advertisable = optional(bool) + # allocation_resource_tags = optional(map(string)) + # tags = optional(map(string)) + # cidr_authorization_context = optional(map(string)) + + # sub_pools = (repeat of pool_configuration object above ) + # }) + default = {} + + # Validate no more than 3 layers of sub_pools specified + # TODO: fix validation, fails if less than 2 layers of pools + # validation { + # error_message = "Sub pools (sub_pools) is defined in the 3rd level of a nested pool. Sub pools can only be defined up to 3 levels." + # condition = flatten([for k, v in var.pool_configurations : [for k2, v2 in v.sub_pools : [for k3, v3 in try(v2.sub_pools, []) : "${k}/${k2}/${k3}" if try(v3.sub_pools, []) != []]]]) == [] + # } +} + +variable "top_cidr" { + description = "Top-level CIDR blocks." + type = list(string) +} + +variable "top_ram_share_principals" { + description = "Principals to create RAM shares for top-level pool." + type = list(string) + default = null +} + +variable "top_auto_import" { + description = "`auto_import` setting for top-level pool." + type = bool + default = null +} + +variable "top_description" { + description = "Description of top-level pool." + type = string + default = "" +} + +variable "top_cidr_authorization_context" { + description = "A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP. Document is not stored in the state file. For more information, refer to https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_ipam_pool_cidr#cidr_authorization_context." + type = any + default = null +} + +variable "address_family" { + description = "IPv4/6 address family." + type = string + default = "ipv4" + validation { + condition = var.address_family == "ipv4" || var.address_family == "ipv6" + error_message = "Only valid options: \"ipv4\", \"ipv6\"." + } +} + +variable "ipam_scope_id" { + description = "(Optional) Required if `var.ipam_id` is set. Determines which scope to deploy pools into." + type = string + default = null +} + +variable "ipam_scope_type" { + description = "Which scope type to use. Valid inputs include `public` or `private`. You can alternatively provide your own scope ID." + type = string + default = "private" + + validation { + condition = var.ipam_scope_type == "public" || var.ipam_scope_type == "private" + error_message = "Scope type must be either public or private." + } +} diff --git a/modules/ipam/versions.tf b/modules/ipam/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/modules/ipam/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/kinesis-stream/README.md b/modules/kinesis-stream/README.md index 0c8303c84..98d5758b4 100644 --- a/modules/kinesis-stream/README.md +++ b/modules/kinesis-stream/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/kinesis-stream + - layer/addons + - provider/aws +--- + # Component: `kinesis-stream` This component is responsible for provisioning an Amazon Kinesis data stream. @@ -24,16 +31,15 @@ components: tags: Team: sre Service: kinesis-stream - ``` ```yaml import: -- catalog/kinesis-stream/defaults + - catalog/kinesis-stream/defaults components: terraform: - kinesis-example: + kinesis-example: metadata: component: kinesis-stream inherits: @@ -45,13 +51,14 @@ components: kms_key_id: "alias/aws/kinesis" ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers @@ -84,8 +91,6 @@ No resources. | [enforce\_consumer\_deletion](#input\_enforce\_consumer\_deletion) | Forcefully delete stream consumers before destroying the stream. | `bool` | `true` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kms\_key\_id](#input\_kms\_key\_id) | The name of the KMS key to use for encryption. | `string` | `"alias/aws/kinesis"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | @@ -111,8 +116,11 @@ No resources. | [shard\_count](#output\_shard\_count) | Number of shards provisioned. | | [stream\_arn](#output\_stream\_arn) | ARN of the the Kinesis stream. | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/kinesis-stream) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/kinesis-stream) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/kinesis-stream/default.auto.tfvars b/modules/kinesis-stream/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/kinesis-stream/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/kinesis-stream/providers.tf b/modules/kinesis-stream/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/kinesis-stream/providers.tf +++ b/modules/kinesis-stream/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/kinesis-stream/versions.tf b/modules/kinesis-stream/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/kinesis-stream/versions.tf +++ b/modules/kinesis-stream/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/kms/README.md b/modules/kms/README.md index e5d53d1ba..0b51e2ddc 100644 --- a/modules/kms/README.md +++ b/modules/kms/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/kms + - layer/addons + - provider/aws +--- + # Component: `kms` This component is responsible for provisioning a KMS Key. @@ -19,7 +26,6 @@ components: enabled: true ``` - ## Requirements | Name | Version | @@ -35,12 +41,13 @@ components: ## Modules -| Name | Source | Version | -|------|--------|---------| -| [allowed\_role\_map](#module\_allowed\_role\_map) | ../account-map/modules/roles-to-principals | n/a | -| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [kms\_key](#module\_kms\_key) | cloudposse/kms-key/aws | 0.12.1 | -| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| Name | Source | Version | +| -------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------- | +| [iam_roles](#module_iam_roles) | git::ssh://git@github.com/spenmo/infrastructure.git//components/terraform/account-map/modules/iam-roles | n/a | +| [introspection](#module_introspection) | cloudposse/label/null | 0.25.0 | +| [kms_key](#module_kms_key) | cloudposse/kms-key/aws | 0.12.1 | +| [monorepo](#module_monorepo) | git::ssh://git@github.com/spenmo/infrastructure.git | n/a | +| [this](#module_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -90,11 +97,13 @@ components: | Name | Description | |------|-------------| | [kms\_key](#output\_kms\_key) | Output for KMS module | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/kms) - Cloud Posse's upstream component -* [cloudposse/terraform-aws-kms-key](https://github.com/cloudposse/terraform-aws-kms-key) - Cloud Posse's upstream module +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/kms) - + Cloud Posse's upstream component +- [cloudposse/terraform-aws-kms-key](https://github.com/cloudposse/terraform-aws-kms-key) - Cloud Posse's upstream + module [](https://cpco.io/component) diff --git a/modules/kms/providers.tf b/modules/kms/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/kms/providers.tf +++ b/modules/kms/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/kms/versions.tf b/modules/kms/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/kms/versions.tf +++ b/modules/kms/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/lakeformation/README.md b/modules/lakeformation/README.md index 236959e64..83807cb20 100644 --- a/modules/lakeformation/README.md +++ b/modules/lakeformation/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/lakeformation + - layer/unassigned + - provider/aws +--- + # Component: `lakeformation` This component is responsible for provisioning Amazon Lake Formation resources. @@ -8,7 +15,8 @@ This component is responsible for provisioning Amazon Lake Formation resources. Here are some example snippets for how to use this component: -`stacks/catalog/lakeformation/defaults.yaml` file (base component for all lakeformation deployments with default settings): +`stacks/catalog/lakeformation/defaults.yaml` file (base component for all lakeformation deployments with default +settings): ```yaml components: @@ -28,11 +36,11 @@ components: ```yaml import: -- catalog/lakeformation/defaults + - catalog/lakeformation/defaults components: terraform: - lakeformation-example: + lakeformation-example: metadata: component: lakeformation inherits: @@ -54,6 +62,7 @@ components: left: test1 ``` + ## Requirements @@ -99,8 +108,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -125,8 +132,11 @@ components: |------|-------------| | [lf\_tags](#output\_lf\_tags) | List of LF tags created. | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/lakeformation) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/lakeformation) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/lakeformation/default.auto.tfvars b/modules/lakeformation/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/lakeformation/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/lakeformation/providers.tf b/modules/lakeformation/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/lakeformation/providers.tf +++ b/modules/lakeformation/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/lambda/README.md b/modules/lambda/README.md new file mode 100644 index 000000000..60cce5310 --- /dev/null +++ b/modules/lambda/README.md @@ -0,0 +1,255 @@ +--- +tags: + - component/lambda + - layer/software-delivery + - provider/aws +--- + +# Component: `lambda` + +This component is responsible for provisioning Lambda functions. + +## Usage + +**Stack Level**: Regional + +Stack configuration for defaults: + +```yaml +components: + terraform: + lambda-defaults: + metadata: + type: abstract + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true +``` + +Sample App Yaml Entry: + +```yaml +import: + - catalog/lambda/defaults + +components: + terraform: + lambda/hello-world-py: + metadata: + component: lambda + inherits: + - lambda/defaults + vars: + name: hello-world-py + function_name: main + description: Hello Lambda from Python! + handler: lambda.lambda_handler # in go this is the compiled binary, python it's filename.function + memory_size: 256 + # https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html + runtime: python3.9 + package_type: Zip # `Zip` or `Image` + policy_json: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListAllBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" + } + ] + } + iam_policy: + statements: + - sid: AllowSQSWorkerWriteAccess + effect: Allow + actions: + - sqs:SendMessage + - sqs:SendMessageBatch + resources: + - arn:aws:sqs:*:111111111111:worker-queue + # Filename example + filename: lambdas/hello-world-python/output.zip # generated by zip variable. + zip: + enabled: true + input_dir: hello-world-python + output: hello-world-python/output.zip + + # S3 Source Example + # s3_bucket_name: lambda-source # lambda main.tf calculates the rest of the bucket_name + # s3_key: hello-world-go.zip +``` + +### Notifications: + +#### SQS + +```yaml +sqs_notifications: + my-service-a: + sqs_component: + component: sqs-queue/my-service-a + my-service-b: + sqs_arn: arn:aws:sqs:us-west-2:111111111111:my-service-b +``` + +#### S3 + +```yaml +s3_notifications: + my-service-a: + bucket_component: + component: s3-bucket/my-service-a + events: ["s3:ObjectCreated:*"] + my-service-b: + bucket_name: my-service-b + events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"] +``` + +#### Cron (CloudWatch Event) + +```yaml +cloudwatch_event_rules: + schedule-a: + schedule_expression: "rate(5 minutes)" + schedule-b: + schedule_expression: "cron(0 20 * * ? *)" +``` + + + +## Requirements + +| Name | Version | +| ------------------------------------------------------------------------ | -------- | +| [terraform](#requirement_terraform) | >= 1.3.0 | +| [archive](#requirement_archive) | >= 2.3.0 | +| [aws](#requirement_aws) | >= 4.9.0 | +| [random](#requirement_random) | >= 3.0.0 | + +## Providers + +| Name | Version | +| ------------------------------------------------------------ | -------- | +| [archive](#provider_archive) | >= 2.3.0 | +| [aws](#provider_aws) | >= 4.9.0 | +| [random](#provider_random) | >= 3.0.0 | + +## Modules + +| Name | Source | Version | +| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | ------- | +| [cloudwatch_event_rules_label](#module_cloudwatch_event_rules_label) | cloudposse/label/null | 0.25.0 | +| [iam_policy](#module_iam_policy) | cloudposse/iam-policy/aws | 1.0.1 | +| [iam_roles](#module_iam_roles) | ../account-map/modules/iam-roles | n/a | +| [lambda](#module_lambda) | cloudposse/lambda-function/aws | 0.6.1 | +| [s3_bucket](#module_s3_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [s3_bucket_notifications_component](#module_s3_bucket_notifications_component) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [sqs_iam_policy](#module_sqs_iam_policy) | cloudposse/iam-policy/aws | 1.0.1 | +| [sqs_queue](#module_sqs_queue) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [this](#module_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| [aws_cloudwatch_event_rule.event_rules](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_target.sns](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_iam_role_policy_attachment.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.sqs_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_lambda_event_source_mapping.event_source_mapping](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_event_source_mapping) | resource | +| [aws_lambda_function_url.lambda_url](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function_url) | resource | +| [aws_lambda_permission.allow_cloudwatch_to_call_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | +| [aws_lambda_permission.s3_notification](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | +| [aws_s3_bucket_notification.s3_notifications](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_notification) | resource | +| [random_pet.zip_recreator](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource | +| [archive_file.lambdazip](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source | +| [aws_ssm_parameter.cicd_ssm_param](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +| --------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------: | +| [additional_tag_map](#input_additional_tag_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [architectures](#input_architectures) | Instruction set architecture for your Lambda function. Valid values are ["x86\_64"] and ["arm64"].
Default is ["x86\_64"]. Removing this attribute, function's architecture stay the same. | `list(string)` | `null` | no | +| [attributes](#input_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [cicd_s3_key_format](#input_cicd_s3_key_format) | The format of the S3 key to store the latest version/sha of the Lambda function. This is used with cicd_ssm_param_name. Defaults to 'stage/{stage}/lambda/{function_name}/%s.zip' | `string` | `null` | no | +| [cicd_ssm_param_name](#input_cicd_ssm_param_name) | The name of the SSM parameter to store the latest version/sha of the Lambda function. This is used with cicd_s3_key_format | `string` | `null` | no | +| [cloudwatch_event_rules](#input_cloudwatch_event_rules) | Creates EventBridge (CloudWatch Events) rules for invoking the Lambda Function along with the required permissions. |
map(object({
description = optional(string)
event_bus_name = optional(string)
event_pattern = optional(string)
is_enabled = optional(bool)
name_prefix = optional(string)
role_arn = optional(string)
schedule_expression = optional(string)
}))
| `{}` | no | +| [cloudwatch_lambda_insights_enabled](#input_cloudwatch_lambda_insights_enabled) | Enable CloudWatch Lambda Insights for the Lambda Function. | `bool` | `false` | no | +| [cloudwatch_logs_kms_key_arn](#input_cloudwatch_logs_kms_key_arn) | The ARN of the KMS Key to use when encrypting log data. | `string` | `null` | no | +| [cloudwatch_logs_retention_in_days](#input_cloudwatch_logs_retention_in_days) | Specifies the number of days you want to retain log events in the specified log group. Possible values are:
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0. If you select 0, the events in the
log group are always retained and never expire. | `number` | `null` | no | +| [context](#input_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional_tag_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [custom_iam_policy_arns](#input_custom_iam_policy_arns) | ARNs of IAM policies to be attached to the Lambda role | `set(string)` | `[]` | no | +| [dead_letter_config_target_arn](#input_dead_letter_config_target_arn) | ARN of an SNS topic or SQS queue to notify when an invocation fails. If this option is used, the function's IAM role
must be granted suitable access to write to the target object, which means allowing either the sns:Publish or
sqs:SendMessage action on this ARN, depending on which service is targeted." | `string` | `null` | no | +| [delimiter](#input_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [description](#input_description) | Description of what the Lambda Function does. | `string` | `null` | no | +| [descriptor_formats](#input_descriptor_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [filename](#input_filename) | The path to the function's deployment package within the local filesystem. Works well with the `zip` variable. If defined, The s3\_-prefixed options and image_uri cannot be used. | `string` | `null` | no | +| [function_name](#input_function_name) | Unique name for the Lambda Function. | `string` | `null` | no | +| [function_url_enabled](#input_function_url_enabled) | Create a aws_lambda_function_url resource to expose the Lambda function | `bool` | `false` | no | +| [handler](#input_handler) | The function entrypoint in your code. | `string` | `null` | no | +| [iam_policy](#input_iam_policy) | IAM policy to attach to the Lambda role, specified as a Terraform object. This can be used with or instead of `var.policy_json`. |
object({
policy_id = optional(string, null)
version = optional(string, null)
statements = list(object({
sid = optional(string, null)
effect = optional(string, null)
actions = optional(list(string), null)
not_actions = optional(list(string), null)
resources = optional(list(string), null)
not_resources = optional(list(string), null)
conditions = optional(list(object({
test = string
variable = string
values = list(string)
})), [])
principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
not_principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
}))
})
| `null` | no | +| [iam_policy_description](#input_iam_policy_description) | Description of the IAM policy for the Lambda IAM role | `string` | `"Minimum SSM read permissions for Lambda IAM Role"` | no | +| [id_length_limit](#input_id_length_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [image_config](#input_image_config) | The Lambda OCI [image configurations](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function#image_config)
block with three (optional) arguments:
- _entry_point_ - The ENTRYPOINT for the docker image (type `list(string)`).
- _command_ - The CMD for the docker image (type `list(string)`).
- _working_directory_ - The working directory for the docker image (type `string`). | `any` | `{}` | no | +| [image_uri](#input_image_uri) | The ECR image URI containing the function's deployment package. Conflicts with `filename`, `s3_bucket_name`, `s3_key`, and `s3_object_version`. | `string` | `null` | no | +| [kms_key_arn](#input_kms_key_arn) | Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables.
If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key.
If this configuration is provided when environment variables are not in use, the AWS Lambda API does not save this
configuration and Terraform will show a perpetual difference of adding the key. To fix the perpetual difference,
remove this configuration. | `string` | `""` | no | +| [label_key_case](#input_label_key_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label_order](#input_label_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label_value_case](#input_label_value_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels_as_tags](#input_labels_as_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [lambda_at_edge_enabled](#input_lambda_at_edge_enabled) | Enable Lambda@Edge for your Node.js or Python functions. The required trust relationship and publishing of function versions will be configured in this module. | `bool` | `false` | no | +| [lambda_environment](#input_lambda_environment) | Environment (e.g. ENV variables) configuration for the Lambda function enable you to dynamically pass settings to your function code and libraries. |
object({
variables = map(string)
})
| `null` | no | +| [layers](#input_layers) | List of Lambda Layer Version ARNs (maximum of 5) to attach to the Lambda Function. | `list(string)` | `[]` | no | +| [memory_size](#input_memory_size) | Amount of memory in MB the Lambda Function can use at runtime. | `number` | `128` | no | +| [name](#input_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [package_type](#input_package_type) | The Lambda deployment package type. Valid values are `Zip` and `Image`. | `string` | `"Zip"` | no | +| [permissions_boundary](#input_permissions_boundary) | ARN of the policy that is used to set the permissions boundary for the role | `string` | `""` | no | +| [policy_json](#input_policy_json) | IAM policy to attach to the Lambda role, specified as JSON. This can be used with or instead of `var.iam_policy`. | `string` | `null` | no | +| [publish](#input_publish) | Whether to publish creation/change as new Lambda Function Version. | `bool` | `false` | no | +| [regex_replace_chars](#input_regex_replace_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input_region) | AWS Region | `string` | n/a | yes | +| [reserved_concurrent_executions](#input_reserved_concurrent_executions) | The amount of reserved concurrent executions for this lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations. | `number` | `-1` | no | +| [runtime](#input_runtime) | The runtime environment for the Lambda function you are uploading. | `string` | `null` | no | +| [s3_bucket_component](#input_s3_bucket_component) | The bucket component to use for the S3 bucket containing the function's deployment package. Conflicts with `s3_bucket_name`, `filename` and `image_uri`. |
object({
component = string
tenant = optional(string)
stage = optional(string)
environment = optional(string)
})
| `null` | no | +| [s3_bucket_name](#input_s3_bucket_name) | The name suffix of the S3 bucket containing the function's deployment package. Conflicts with filename and image_uri.
This bucket must reside in the same AWS region where you are creating the Lambda function. | `string` | `null` | no | +| [s3_key](#input_s3_key) | The S3 key of an object containing the function's deployment package. Conflicts with filename and image_uri. | `string` | `null` | no | +| [s3_notifications](#input_s3_notifications) | A map of S3 bucket notifications to trigger the Lambda function |
map(object({
bucket_name = optional(string)
bucket_component = optional(object({
component = string
environment = optional(string)
tenant = optional(string)
stage = optional(string)
}))
events = optional(list(string))
filter_prefix = optional(string)
filter_suffix = optional(string)
source_account = optional(string)
}))
| `{}` | no | +| [s3_object_version](#input_s3_object_version) | The object version containing the function's deployment package. Conflicts with filename and image_uri. | `string` | `null` | no | +| [source_code_hash](#input_source_code_hash) | Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either
filename or s3_key. The usual way to set this is filebase64sha256('file.zip') where 'file.zip' is the local filename
of the lambda function source archive. | `string` | `""` | no | +| [sqs_notifications](#input_sqs_notifications) | A map of SQS queue notifications to trigger the Lambda function |
map(object({
sqs_arn = optional(string)
sqs_component = optional(object({
component = string
environment = optional(string)
tenant = optional(string)
stage = optional(string)
}))
batch_size = optional(number)
source_account = optional(string)
on_failure_arn = optional(string)
maximum_concurrency = optional(number)
}))
| `{}` | no | +| [ssm_parameter_names](#input_ssm_parameter_names) | List of AWS Systems Manager Parameter Store parameter names. The IAM role of this Lambda function will be enhanced
with read permissions for those parameters. Parameters must start with a forward slash and can be encrypted with the
default KMS key. | `list(string)` | `null` | no | +| [stage](#input_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [timeout](#input_timeout) | The amount of time the Lambda Function has to run in seconds. | `number` | `3` | no | +| [tracing_config_mode](#input_tracing_config_mode) | Tracing config mode of the Lambda function. Can be either PassThrough or Active. | `string` | `null` | no | +| [vpc_config](#input_vpc_config) | Provide this to allow your function to access your VPC (if both 'subnet_ids' and 'security_group_ids' are empty then
vpc_config is considered to be empty or unset, see https://docs.aws.amazon.com/lambda/latest/dg/vpc.html for details). |
object({
security_group_ids = list(string)
subnet_ids = list(string)
})
| `null` | no | +| [zip](#input_zip) | Zip Configuration for local file deployments |
object({
enabled = optional(bool, false)
output = optional(string, "output.zip")
input_dir = optional(string, null)
})
|
{
"enabled": false,
"output": "output.zip"
}
| no | + +## Outputs + +| Name | Description | +| -------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | +| [arn](#output_arn) | ARN of the lambda function | +| [function_name](#output_function_name) | Lambda function name | +| [invoke_arn](#output_invoke_arn) | Invoke ARN of the lambda function | +| [qualified_arn](#output_qualified_arn) | ARN identifying your Lambda Function Version (if versioning is enabled via publish = true) | +| [role_arn](#output_role_arn) | Lambda IAM role ARN | +| [role_name](#output_role_name) | Lambda IAM role name | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/lambda) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/lambda/context.tf b/modules/lambda/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/lambda/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/lambda/lambdas/.gitignore b/modules/lambda/lambdas/.gitignore new file mode 100644 index 000000000..e76f4fc4d --- /dev/null +++ b/modules/lambda/lambdas/.gitignore @@ -0,0 +1,3 @@ +*/*.zip + +*/hello_world diff --git a/modules/lambda/lambdas/hello-world-python/lambda.py b/modules/lambda/lambdas/hello-world-python/lambda.py new file mode 100644 index 000000000..2990b3670 --- /dev/null +++ b/modules/lambda/lambdas/hello-world-python/lambda.py @@ -0,0 +1,5 @@ +def lambda_handler(event, context): + message = 'Hello {} {}!'.format(event['first_name'], event['last_name']) + return { + 'message' : message + } diff --git a/modules/lambda/main.tf b/modules/lambda/main.tf new file mode 100644 index 000000000..93ce28e94 --- /dev/null +++ b/modules/lambda/main.tf @@ -0,0 +1,119 @@ +locals { + enabled = module.this.enabled + var_iam_policy_enabled = local.enabled && (try(length(var.iam_policy), 0) > 0 || var.policy_json != null) + iam_policy_enabled = local.enabled && local.var_iam_policy_enabled + + s3_bucket_name = var.s3_bucket_name != null ? var.s3_bucket_name : one(module.s3_bucket[*].outputs.bucket_id) + + function_name = coalesce(var.function_name, module.this.id) + + var_policy_json = local.var_iam_policy_enabled ? [var.policy_json] : [] + + lambda_files = fileset("${path.module}/lambdas/${var.zip.input_dir == null ? "" : var.zip.input_dir}", "*") + file_content_map = var.zip.enabled ? [ + for f in local.lambda_files : filebase64sha256("${path.module}/lambdas/${coalesce(var.zip.input_dir, var.name)}/${f}") + ] : [] + output_zip_file = local.enabled && var.zip.enabled ? "${path.module}/lambdas/${random_pet.zip_recreator[0].id}.zip" : null + + cicd_s3_key_format = var.cicd_s3_key_format != null ? var.cicd_s3_key_format : "stage/${module.this.stage}/lambda/${local.function_name}/%s" + s3_key = var.s3_key != null ? var.s3_key : format(local.cicd_s3_key_format, coalesce(one(data.aws_ssm_parameter.cicd_ssm_param[*].value), "example")) +} + +data "aws_ssm_parameter" "cicd_ssm_param" { + count = local.enabled && var.cicd_ssm_param_name != null ? 1 : 0 + + name = var.cicd_ssm_param_name +} + +module "iam_policy" { + count = local.iam_policy_enabled ? 1 : 0 + source = "cloudposse/iam-policy/aws" + version = "1.0.1" + + iam_policy_enabled = true + iam_policy = var.iam_policy + iam_source_policy_documents = local.var_policy_json != null ? local.var_policy_json : [] + context = module.this.context +} + +resource "aws_iam_role_policy_attachment" "default" { + count = local.iam_policy_enabled ? 1 : 0 + + role = module.lambda.role_name + policy_arn = module.iam_policy[0].policy_arn +} + +data "archive_file" "lambdazip" { + count = local.enabled && var.zip.enabled ? 1 : 0 + type = "zip" + + output_path = local.output_zip_file + source_dir = "${path.module}/lambdas/${var.zip.input_dir}" + + depends_on = [random_pet.zip_recreator] +} + +resource "random_pet" "zip_recreator" { + count = local.enabled && var.zip.enabled ? 1 : 0 + + prefix = coalesce(module.this.name, "lambda") + keepers = { + file_content = join(",", local.file_content_map) + } +} + +module "lambda" { + source = "cloudposse/lambda-function/aws" + version = "0.6.1" + + function_name = local.function_name + description = var.description + handler = var.handler + lambda_environment = var.lambda_environment + image_uri = var.image_uri + image_config = var.image_config + + filename = var.zip.enabled ? coalesce(data.archive_file.lambdazip[0].output_path, var.filename) : var.filename + s3_bucket = local.s3_bucket_name + s3_key = local.s3_key + s3_object_version = var.s3_object_version + + architectures = var.architectures + cloudwatch_lambda_insights_enabled = var.cloudwatch_lambda_insights_enabled + cloudwatch_logs_retention_in_days = var.cloudwatch_logs_retention_in_days + cloudwatch_logs_kms_key_arn = var.cloudwatch_logs_kms_key_arn + kms_key_arn = var.kms_key_arn + lambda_at_edge_enabled = var.lambda_at_edge_enabled + layers = var.layers + memory_size = var.memory_size + package_type = var.package_type + permissions_boundary = var.permissions_boundary + publish = var.publish + reserved_concurrent_executions = var.reserved_concurrent_executions + runtime = var.runtime + source_code_hash = var.source_code_hash + ssm_parameter_names = var.ssm_parameter_names + timeout = var.timeout + tracing_config_mode = var.tracing_config_mode + vpc_config = var.vpc_config + custom_iam_policy_arns = var.custom_iam_policy_arns + dead_letter_config_target_arn = var.dead_letter_config_target_arn + iam_policy_description = var.iam_policy_description + + context = module.this.context +} + +resource "aws_lambda_function_url" "lambda_url" { + count = var.function_url_enabled ? 1 : 0 + function_name = module.lambda.function_name + authorization_type = "AWS_IAM" + + cors { + allow_credentials = true + allow_origins = ["*"] + allow_methods = ["*"] + allow_headers = ["date", "keep-alive"] + expose_headers = ["keep-alive", "date"] + max_age = 86400 + } +} diff --git a/modules/lambda/outputs.tf b/modules/lambda/outputs.tf new file mode 100644 index 000000000..e62082216 --- /dev/null +++ b/modules/lambda/outputs.tf @@ -0,0 +1,29 @@ +output "arn" { + description = "ARN of the lambda function" + value = module.lambda.arn +} + +output "invoke_arn" { + description = "Invoke ARN of the lambda function" + value = module.lambda.invoke_arn +} + +output "qualified_arn" { + description = "ARN identifying your Lambda Function Version (if versioning is enabled via publish = true)" + value = module.lambda.qualified_arn +} + +output "function_name" { + description = "Lambda function name" + value = module.lambda.function_name +} + +output "role_name" { + description = "Lambda IAM role name" + value = module.lambda.role_name +} + +output "role_arn" { + description = "Lambda IAM role ARN" + value = module.lambda.role_arn +} diff --git a/modules/lambda/providers-archive.tf b/modules/lambda/providers-archive.tf new file mode 100644 index 000000000..f0dc6f82d --- /dev/null +++ b/modules/lambda/providers-archive.tf @@ -0,0 +1 @@ +provider "archive" {} diff --git a/modules/lambda/providers.tf b/modules/lambda/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/lambda/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/lambda/remote-state.tf b/modules/lambda/remote-state.tf new file mode 100644 index 000000000..af8168f35 --- /dev/null +++ b/modules/lambda/remote-state.tf @@ -0,0 +1,14 @@ +module "s3_bucket" { + count = local.enabled && var.s3_bucket_component != null ? 1 : 0 + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.s3_bucket_component.component + + tenant = var.s3_bucket_component.tenant + environment = var.s3_bucket_component.environment + stage = var.s3_bucket_component.stage + + context = module.this.context +} diff --git a/modules/lambda/triggers_cloudwatch_event_rules.tf b/modules/lambda/triggers_cloudwatch_event_rules.tf new file mode 100644 index 000000000..e97aba2b8 --- /dev/null +++ b/modules/lambda/triggers_cloudwatch_event_rules.tf @@ -0,0 +1,43 @@ +module "cloudwatch_event_rules_label" { + for_each = var.cloudwatch_event_rules + + source = "cloudposse/label/null" + version = "0.25.0" + attributes = [each.key] + + context = module.this.context +} + +resource "aws_cloudwatch_event_rule" "event_rules" { + for_each = var.cloudwatch_event_rules + + name = module.cloudwatch_event_rules_label[each.key].id + + description = each.value.description + event_bus_name = each.value.event_bus_name + event_pattern = each.value.event_pattern + is_enabled = each.value.is_enabled + name_prefix = each.value.name_prefix + role_arn = each.value.role_arn + schedule_expression = each.value.schedule_expression + + tags = module.cloudwatch_event_rules_label[each.key].tags +} + +resource "aws_cloudwatch_event_target" "sns" { + for_each = var.cloudwatch_event_rules + + rule = aws_cloudwatch_event_rule.event_rules[each.key].name + target_id = "ScheduleExpression" + arn = module.lambda.arn +} + +resource "aws_lambda_permission" "allow_cloudwatch_to_call_lambda" { + for_each = var.cloudwatch_event_rules + + statement_id = format("%s-%s", "AllowExecutionFromCloudWatch", each.key) + action = "lambda:InvokeFunction" + function_name = module.lambda.function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.event_rules[each.key].arn +} diff --git a/modules/lambda/triggers_s3_notifications.tf b/modules/lambda/triggers_s3_notifications.tf new file mode 100644 index 000000000..ee8af935c --- /dev/null +++ b/modules/lambda/triggers_s3_notifications.tf @@ -0,0 +1,58 @@ +variable "s3_notifications" { + type = map(object({ + bucket_name = optional(string) + bucket_component = optional(object({ + component = string + environment = optional(string) + tenant = optional(string) + stage = optional(string) + })) + events = optional(list(string)) + filter_prefix = optional(string) + filter_suffix = optional(string) + source_account = optional(string) + })) + description = "A map of S3 bucket notifications to trigger the Lambda function" + default = {} +} + +module "s3_bucket_notifications_component" { + for_each = { for k, v in var.s3_notifications : k => v if v.bucket_component != null } + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = each.value.bucket_component.component + + tenant = each.value.bucket_component.tenant + environment = each.value.bucket_component.environment + stage = each.value.bucket_component.stage + + context = module.this.context +} + +resource "aws_lambda_permission" "s3_notification" { + for_each = var.s3_notifications + + statement_id = "AllowS3Invoke" + action = "lambda:InvokeFunction" + function_name = module.lambda.function_name + principal = "s3.amazonaws.com" + source_arn = format("arn:aws:s3:::%s", each.value.bucket_component == null ? each.value.bucket_name : module.s3_bucket_notifications_component[each.key].outputs.bucket_id) + source_account = each.value.source_account +} + +resource "aws_s3_bucket_notification" "s3_notifications" { + for_each = var.s3_notifications + + depends_on = [aws_lambda_permission.s3_notification] + + bucket = each.value.bucket_component == null ? each.value.bucket_name : module.s3_bucket_notifications_component[each.key].outputs.bucket_id + + lambda_function { + lambda_function_arn = module.lambda.arn + events = each.value.events == null ? ["s3:ObjectCreated:*"] : each.value.events + filter_prefix = each.value.filter_prefix + filter_suffix = each.value.filter_suffix + } +} diff --git a/modules/lambda/triggers_sqs_queue.tf b/modules/lambda/triggers_sqs_queue.tf new file mode 100644 index 000000000..747f87bdf --- /dev/null +++ b/modules/lambda/triggers_sqs_queue.tf @@ -0,0 +1,81 @@ +variable "sqs_notifications" { + type = map(object({ + sqs_arn = optional(string) + sqs_component = optional(object({ + component = string + environment = optional(string) + tenant = optional(string) + stage = optional(string) + })) + batch_size = optional(number) + source_account = optional(string) + on_failure_arn = optional(string) + maximum_concurrency = optional(number) + })) + description = "A map of SQS queue notifications to trigger the Lambda function" + default = {} +} + +module "sqs_queue" { + for_each = { for k, v in var.sqs_notifications : k => v if v.sqs_component != null } + + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = each.value.sqs_component.component + + tenant = each.value.sqs_component.tenant + environment = each.value.sqs_component.environment + stage = each.value.sqs_component.stage + + context = module.this.context +} + +module "sqs_iam_policy" { + for_each = var.sqs_notifications + + source = "cloudposse/iam-policy/aws" + version = "1.0.1" + + iam_policy_enabled = true + iam_policy = { + version = "2012-10-17" + statements = [ + { + effect = "Allow" + actions = ["sqs:ReceiveMessage", "sqs:DeleteMessage", "sqs:GetQueueAttributes"] + resources = each.value.sqs_arn != null ? [each.value.sqs_arn.sqs_arn] : [module.sqs_queue[each.key].outputs.sqs_queue.queue_arn] + }, + ] + } + context = module.this.context +} + +resource "aws_iam_role_policy_attachment" "sqs_default" { + for_each = var.sqs_notifications + + role = module.lambda.role_name + policy_arn = module.sqs_iam_policy[each.key].policy_arn +} + +resource "aws_lambda_event_source_mapping" "event_source_mapping" { + for_each = var.sqs_notifications + + event_source_arn = each.value.sqs_arn != null ? [each.value.sqs_arn.sqs_arn] : module.sqs_queue[each.key].outputs.sqs_queue.queue_arn + function_name = module.lambda.function_name + batch_size = each.value.batch_size == null ? 1 : each.value.batch_size + + scaling_config { + maximum_concurrency = each.value.maximum_concurrency + } + dynamic "destination_config" { + for_each = { for k, v in each.value : k => v if k == "on_failure_arn" && v != null } + content { + on_failure { + destination_arn = destination_config.value + } + } + } + + depends_on = [aws_iam_role_policy_attachment.sqs_default] +} diff --git a/modules/lambda/variables.tf b/modules/lambda/variables.tf new file mode 100644 index 000000000..8785a4037 --- /dev/null +++ b/modules/lambda/variables.tf @@ -0,0 +1,324 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "function_name" { + type = string + description = "Unique name for the Lambda Function." + default = null +} + +variable "architectures" { + type = list(string) + description = < + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 5.0 | +| [awsutils](#requirement\_awsutils) | >= 0.17.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 5.0 | +| [awsutils](#provider\_awsutils) | >= 0.17.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_macie2_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/macie2_account) | resource | +| [aws_macie2_organization_admin_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/macie2_organization_admin_account) | resource | +| [awsutils_macie2_organization_settings.this](https://registry.terraform.io/providers/cloudposse/awsutils/latest/docs/resources/macie2_organization_settings) | resource | +| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant](#input\_account\_map\_tenant) | The tenant where the `account_map` component required by remote-state is deployed | `string` | `"core"` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_delegated](#input\_admin\_delegated) | A flag to indicate if the AWS Organization-wide settings should be created. This can only be done after the GuardDuty
Administrator account has already been delegated from the AWS Org Management account (usually 'root'). See the
Deployment section of the README for more information. | `bool` | `false` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delegated\_admininstrator\_component\_name](#input\_delegated\_admininstrator\_component\_name) | The name of the component that created the Macie account. | `string` | `"macie/delegated-administrator"` | no | +| [delegated\_administrator\_account\_name](#input\_delegated\_administrator\_account\_name) | The name of the account that is the AWS Organization Delegated Administrator account | `string` | `"core-security"` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [finding\_publishing\_frequency](#input\_finding\_publishing\_frequency) | Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS
Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events). For more information, see:

https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings_cloudwatch.html#guardduty_findings_cloudwatch_notification_frequency | `string` | `"FIFTEEN_MINUTES"` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [member\_accounts](#input\_member\_accounts) | List of member account names to enable Macie on | `list(string)` | `[]` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [organization\_management\_account\_name](#input\_organization\_management\_account\_name) | The name of the AWS Organization management account | `string` | `null` | no | +| [privileged](#input\_privileged) | true if the default provider already has access to the backend | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (management) account. This is used to lookup account IDs from account names
using the `account-map` component. | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [delegated\_administrator\_account\_id](#output\_delegated\_administrator\_account\_id) | The AWS Account ID of the AWS Organization delegated administrator account | +| [macie\_account\_id](#output\_macie\_account\_id) | The ID of the Macie account created by the component | +| [macie\_service\_role\_arn](#output\_macie\_service\_role\_arn) | The Amazon Resource Name (ARN) of the service-linked role that allows Macie to monitor and analyze data in AWS resources for the account. | +| [member\_account\_ids](#output\_member\_account\_ids) | The AWS Account IDs of the member accounts | + + + +## References + +- [AWS GuardDuty Documentation](https://aws.amazon.com/guardduty/) +- [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/guardduty/common/) + +[](https://cpco.io/component) diff --git a/modules/macie/context.tf b/modules/macie/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/macie/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/macie/main.tf b/modules/macie/main.tf new file mode 100644 index 000000000..594f08e3c --- /dev/null +++ b/modules/macie/main.tf @@ -0,0 +1,41 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map + + current_account_id = one(data.aws_caller_identity.this[*].account_id) + member_account_id_list = [for a in keys(local.account_map) : (local.account_map[a]) if contains(var.member_accounts, a) && local.account_map[a] != local.org_delegated_administrator_account_id] + org_delegated_administrator_account_id = local.account_map[var.delegated_administrator_account_name] + org_management_account_id = var.organization_management_account_name == null ? local.account_map[module.account_map.outputs.root_account_account_name] : local.account_map[var.organization_management_account_name] + is_org_delegated_administrator_account = local.current_account_id == local.org_delegated_administrator_account_id + is_org_management_account = local.current_account_id == local.org_management_account_id + + is_root_account_member_account = local.is_org_management_account && contains(local.member_account_id_list, local.org_management_account_id) + create_macie_account = local.enabled && ((local.is_org_delegated_administrator_account && !var.admin_delegated) || local.is_root_account_member_account) + create_org_delegation = local.enabled && local.is_org_management_account + create_org_settings = local.enabled && local.is_org_delegated_administrator_account && var.admin_delegated +} + +data "aws_caller_identity" "this" { + count = local.enabled ? 1 : 0 +} + +# If we are are in the AWS Org management account, delegate Macie to the org administrator account +# (usually the security account) +resource "aws_macie2_organization_admin_account" "this" { + count = local.create_org_delegation ? 1 : 0 + admin_account_id = local.org_delegated_administrator_account_id +} + +resource "awsutils_macie2_organization_settings" "this" { + count = local.create_org_settings ? 1 : 0 + member_accounts = local.member_account_id_list +} + +# If we are are in the AWS Org designated administrator account, enable macie detector and optionally create an +# SNS topic for notifications and CloudWatch event rules for findings +resource "aws_macie2_account" "this" { + count = local.create_macie_account ? 1 : 0 + + finding_publishing_frequency = var.finding_publishing_frequency + status = "ENABLED" +} diff --git a/modules/macie/outputs.tf b/modules/macie/outputs.tf new file mode 100644 index 000000000..47f350e15 --- /dev/null +++ b/modules/macie/outputs.tf @@ -0,0 +1,19 @@ +output "delegated_administrator_account_id" { + value = local.org_delegated_administrator_account_id + description = "The AWS Account ID of the AWS Organization delegated administrator account" +} + +output "member_account_ids" { + value = local.create_org_settings ? local.member_account_id_list : null + description = "The AWS Account IDs of the member accounts" +} + +output "macie_account_id" { + value = local.create_macie_account ? try(aws_macie2_account.this[0].id, null) : null + description = "The ID of the Macie account created by the component" +} + +output "macie_service_role_arn" { + value = local.create_macie_account ? try(aws_macie2_account.this[0].service_role, null) : null + description = "The Amazon Resource Name (ARN) of the service-linked role that allows Macie to monitor and analyze data in AWS resources for the account." +} diff --git a/modules/macie/providers.tf b/modules/macie/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/macie/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/macie/remote-state.tf b/modules/macie/remote-state.tf new file mode 100644 index 000000000..d9c31bca2 --- /dev/null +++ b/modules/macie/remote-state.tf @@ -0,0 +1,12 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + tenant = var.account_map_tenant != "" ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} diff --git a/modules/macie/variables.tf b/modules/macie/variables.tf new file mode 100644 index 000000000..22414b9f7 --- /dev/null +++ b/modules/macie/variables.tf @@ -0,0 +1,78 @@ +variable "account_map_tenant" { + type = string + default = "core" + description = "The tenant where the `account_map` component required by remote-state is deployed" +} + +variable "admin_delegated" { + type = bool + default = false + description = < + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [time](#requirement\_time) | >= 0.11.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [time](#provider\_time) | >= 0.11.1 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [managed\_grafana](#module\_managed\_grafana) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [ssm\_parameters](#module\_ssm\_parameters) | cloudposse/ssm-parameter-store/aws | 0.13.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_grafana_workspace_api_key.key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/grafana_workspace_api_key) | resource | +| [time_rotating.ttl](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/rotating) | resource | +| [time_static.ttl](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/static) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [grafana\_component\_name](#input\_grafana\_component\_name) | The name of the Grafana component | `string` | `"managed-grafana/workspace"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [key\_role](#input\_key\_role) | Specifies the permission level of the API key. Valid values are VIEWER, EDITOR, or ADMIN. | `string` | `"ADMIN"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [minutes\_to\_live](#input\_minutes\_to\_live) | Specifies the time in minutes until the API key expires. Keys can be valid for up to 30 days. | `number` | `43200` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [ssm\_path\_format\_api\_key](#input\_ssm\_path\_format\_api\_key) | The path in AWS SSM to the Grafana API Key provisioned with this component | `string` | `"/grafana/%s/api_key"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [ssm\_path\_grafana\_api\_key](#output\_ssm\_path\_grafana\_api\_key) | The path in AWS SSM to the Grafana API Key provisioned with this component | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/managed-grafana/api-key) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/managed-grafana/api-key/context.tf b/modules/managed-grafana/api-key/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/managed-grafana/api-key/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/managed-grafana/api-key/main.tf b/modules/managed-grafana/api-key/main.tf new file mode 100644 index 000000000..8300a7981 --- /dev/null +++ b/modules/managed-grafana/api-key/main.tf @@ -0,0 +1,45 @@ +locals { + enabled = module.this.enabled + + ssm_path_api_key = format(var.ssm_path_format_api_key, module.this.id) +} + +resource "time_rotating" "ttl" { + rotation_minutes = var.minutes_to_live +} + +resource "time_static" "ttl" { + rfc3339 = time_rotating.ttl.rfc3339 +} + +resource "aws_grafana_workspace_api_key" "key" { + count = local.enabled ? 1 : 0 + + key_name = module.this.id + key_role = var.key_role + seconds_to_live = var.minutes_to_live * 60 + workspace_id = module.managed_grafana.outputs.workspace_id + + lifecycle { + replace_triggered_by = [ + time_static.ttl + ] + } +} + +module "ssm_parameters" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.13.0" + + parameter_write = [ + { + name = local.ssm_path_api_key + value = aws_grafana_workspace_api_key.key[0].key + type = "SecureString" + overwrite = "true" + description = "Grafana Workspace API Key" + } + ] + + context = module.this.context +} diff --git a/modules/managed-grafana/api-key/outputs.tf b/modules/managed-grafana/api-key/outputs.tf new file mode 100644 index 000000000..74e0f5375 --- /dev/null +++ b/modules/managed-grafana/api-key/outputs.tf @@ -0,0 +1,4 @@ +output "ssm_path_grafana_api_key" { + description = "The path in AWS SSM to the Grafana API Key provisioned with this component" + value = local.ssm_path_api_key +} diff --git a/modules/managed-grafana/api-key/providers.tf b/modules/managed-grafana/api-key/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/managed-grafana/api-key/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/managed-grafana/api-key/remote-state.tf b/modules/managed-grafana/api-key/remote-state.tf new file mode 100644 index 000000000..dfdfa67dd --- /dev/null +++ b/modules/managed-grafana/api-key/remote-state.tf @@ -0,0 +1,8 @@ +module "managed_grafana" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.grafana_component_name + + context = module.this.context +} diff --git a/modules/managed-grafana/api-key/variables.tf b/modules/managed-grafana/api-key/variables.tf new file mode 100644 index 000000000..4a5cba6c6 --- /dev/null +++ b/modules/managed-grafana/api-key/variables.tf @@ -0,0 +1,28 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "grafana_component_name" { + type = string + description = "The name of the Grafana component" + default = "managed-grafana/workspace" +} + +variable "ssm_path_format_api_key" { + type = string + description = "The path in AWS SSM to the Grafana API Key provisioned with this component" + default = "/grafana/%s/api_key" +} + +variable "key_role" { + type = string + description = "Specifies the permission level of the API key. Valid values are VIEWER, EDITOR, or ADMIN." + default = "ADMIN" +} + +variable "minutes_to_live" { + type = number + description = "Specifies the time in minutes until the API key expires. Keys can be valid for up to 30 days." + default = 43200 # 30 days +} diff --git a/modules/managed-grafana/api-key/versions.tf b/modules/managed-grafana/api-key/versions.tf new file mode 100644 index 000000000..00e0b097d --- /dev/null +++ b/modules/managed-grafana/api-key/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + time = { + source = "hashicorp/time" + version = ">= 0.11.1" + } + } +} diff --git a/modules/managed-grafana/dashboard/README.md b/modules/managed-grafana/dashboard/README.md new file mode 100644 index 000000000..170b5941a --- /dev/null +++ b/modules/managed-grafana/dashboard/README.md @@ -0,0 +1,113 @@ +--- +tags: + - component/managed-grafana/dashboard + - layer/grafana + - provider/aws + - provider/grafana +--- + +# Component: `managed-grafana/dashboard` + +This component is responsible for provisioning a dashboard an Amazon Managed Grafana workspace. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + grafana/dashboard/prometheus: + metadata: + component: managed-grafana/dashboard + vars: + enabled: true + name: "prometheus-dashboard" + grafana_component_name: grafana + grafana_api_key_component_name: grafana/api-key + dashboard_url: "https://grafana.com/api/dashboards/315/revisions/3/download" + config_input: + "${DS_PROMETHEUS}": "acme-plat-ue2-sandbox-prometheus" # Input Value : Data source UID +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [grafana](#requirement\_grafana) | >= 2.18.0 | +| [http](#requirement\_http) | >= 3.4.2 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [grafana](#provider\_grafana) | >= 2.18.0 | +| [http](#provider\_http) | >= 3.4.2 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [config\_json](#module\_config\_json) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | +| [grafana](#module\_grafana) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [grafana\_api\_key](#module\_grafana\_api\_key) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [grafana_dashboard.this](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/dashboard) | resource | +| [aws_ssm_parameter.grafana_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [http_http.grafana_dashboard_json](https://registry.terraform.io/providers/hashicorp/http/latest/docs/data-sources/http) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_config](#input\_additional\_config) | Additional dashboard configuration to be merged with the provided dashboard JSON | `map(any)` | `{}` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [config\_input](#input\_config\_input) | A map of string replacements used to supply input for the dashboard config JSON | `map(string)` | `{}` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [dashboard\_name](#input\_dashboard\_name) | The name to use for the dashboard. This must be unique. | `string` | n/a | yes | +| [dashboard\_url](#input\_dashboard\_url) | The marketplace URL of the dashboard to be created | `string` | n/a | yes | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [grafana\_api\_key\_component\_name](#input\_grafana\_api\_key\_component\_name) | The name of the component used to provision an Amazon Managed Grafana API key | `string` | `"managed-grafana/api-key"` | no | +| [grafana\_component\_name](#input\_grafana\_component\_name) | The name of the component used to provision an Amazon Managed Grafana workspace | `string` | `"managed-grafana/workspace"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +No outputs. + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/managed-grafana/dashboard) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/managed-grafana/dashboard/context.tf b/modules/managed-grafana/dashboard/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/managed-grafana/dashboard/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/managed-grafana/dashboard/main.tf b/modules/managed-grafana/dashboard/main.tf new file mode 100644 index 000000000..98e59054a --- /dev/null +++ b/modules/managed-grafana/dashboard/main.tf @@ -0,0 +1,35 @@ +locals { + enabled = module.this.enabled + + # Replace each of the keys in var.config_input with the given value in the module.config_json[0].merged result + config_json = join("", [for k in keys(var.config_input) : replace(jsonencode(module.config_json[0].merged), k, var.config_input[k])]) +} + +data "http" "grafana_dashboard_json" { + count = local.enabled ? 1 : 0 + + url = var.dashboard_url +} + +module "config_json" { + source = "cloudposse/config/yaml//modules/deepmerge" + version = "1.0.2" + + count = local.enabled ? 1 : 0 + + maps = [ + jsondecode(data.http.grafana_dashboard_json[0].response_body), + { + "title" : var.dashboard_name, + "uid" : var.dashboard_name, + "id" : var.dashboard_name + }, + var.additional_config + ] +} + +resource "grafana_dashboard" "this" { + count = local.enabled ? 1 : 0 + + config_json = local.config_json +} diff --git a/modules/managed-grafana/dashboard/provider-grafana.tf b/modules/managed-grafana/dashboard/provider-grafana.tf new file mode 100644 index 000000000..51d6e65ea --- /dev/null +++ b/modules/managed-grafana/dashboard/provider-grafana.tf @@ -0,0 +1,38 @@ +variable "grafana_component_name" { + type = string + description = "The name of the component used to provision an Amazon Managed Grafana workspace" + default = "managed-grafana/workspace" +} + +variable "grafana_api_key_component_name" { + type = string + description = "The name of the component used to provision an Amazon Managed Grafana API key" + default = "managed-grafana/api-key" +} + +module "grafana" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.grafana_component_name + + context = module.this.context +} + +module "grafana_api_key" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.grafana_api_key_component_name + + context = module.this.context +} + +data "aws_ssm_parameter" "grafana_api_key" { + name = module.grafana_api_key.outputs.ssm_path_grafana_api_key +} + +provider "grafana" { + url = format("https://%s/", module.grafana.outputs.workspace_endpoint) + auth = data.aws_ssm_parameter.grafana_api_key.value +} diff --git a/modules/managed-grafana/dashboard/providers.tf b/modules/managed-grafana/dashboard/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/managed-grafana/dashboard/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/managed-grafana/dashboard/variables.tf b/modules/managed-grafana/dashboard/variables.tf new file mode 100644 index 000000000..198c88941 --- /dev/null +++ b/modules/managed-grafana/dashboard/variables.tf @@ -0,0 +1,26 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "dashboard_name" { + type = string + description = "The name to use for the dashboard. This must be unique." +} + +variable "dashboard_url" { + type = string + description = "The marketplace URL of the dashboard to be created" +} + +variable "additional_config" { + type = map(any) + description = "Additional dashboard configuration to be merged with the provided dashboard JSON" + default = {} +} + +variable "config_input" { + type = map(string) + description = "A map of string replacements used to supply input for the dashboard config JSON" + default = {} +} diff --git a/modules/managed-grafana/dashboard/versions.tf b/modules/managed-grafana/dashboard/versions.tf new file mode 100644 index 000000000..e3912e5ae --- /dev/null +++ b/modules/managed-grafana/dashboard/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + grafana = { + source = "grafana/grafana" + version = ">= 2.18.0" + } + http = { + source = "hashicorp/http" + version = ">= 3.4.2" + } + } +} diff --git a/modules/managed-grafana/data-source/loki/README.md b/modules/managed-grafana/data-source/loki/README.md new file mode 100644 index 000000000..248b9239d --- /dev/null +++ b/modules/managed-grafana/data-source/loki/README.md @@ -0,0 +1,144 @@ +--- +tags: + - component/managed-grafana/data-source/loki + - layer/grafana + - provider/aws + - provider/grafana +--- + +# Component: `managed-grafana/data-source/loki` + +This component is responsible for provisioning a Loki data source for an Amazon Managed Grafana workspace. + +Use this component alongside the `eks/loki` component. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + grafana/datasource/defaults: + metadata: + component: managed-grafana/data-source/managed-prometheus + type: abstract + vars: + enabled: true + grafana_component_name: grafana + grafana_api_key_component_name: grafana/api-key + + grafana/datasource/plat-sandbox-loki: + metadata: + component: managed-grafana/data-source/loki + inherits: + - grafana/datasource/defaults + vars: + name: plat-sandbox-loki + loki_tenant_name: plat + loki_stage_name: sandbox + + grafana/datasource/plat-dev-loki: + metadata: + component: managed-grafana/data-source/loki + inherits: + - grafana/datasource/defaults + vars: + name: plat-dev-loki + loki_tenant_name: plat + loki_stage_name: dev + + grafana/datasource/plat-prod-loki: + metadata: + component: managed-grafana/data-source/loki + inherits: + - grafana/datasource/defaults + vars: + name: plat-prod-loki + loki_tenant_name: plat + loki_stage_name: prod +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [grafana](#requirement\_grafana) | >= 2.18.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [aws.source](#provider\_aws.source) | >= 4.0 | +| [grafana](#provider\_grafana) | >= 2.18.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [grafana](#module\_grafana) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [grafana\_api\_key](#module\_grafana\_api\_key) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../../account-map/modules/iam-roles | n/a | +| [loki](#module\_loki) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [source\_account\_role](#module\_source\_account\_role) | ../../../account-map/modules/iam-roles | n/a | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [grafana_data_source.loki](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source) | resource | +| [aws_ssm_parameter.basic_auth_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.grafana_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [grafana\_api\_key\_component\_name](#input\_grafana\_api\_key\_component\_name) | The name of the component used to provision an Amazon Managed Grafana API key | `string` | `"managed-grafana/api-key"` | no | +| [grafana\_component\_name](#input\_grafana\_component\_name) | The name of the component used to provision an Amazon Managed Grafana workspace | `string` | `"managed-grafana/workspace"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [loki\_component\_name](#input\_loki\_component\_name) | The name of the loki component | `string` | `"eks/loki"` | no | +| [loki\_environment\_name](#input\_loki\_environment\_name) | The environment where the loki component is deployed | `string` | `""` | no | +| [loki\_stage\_name](#input\_loki\_stage\_name) | The stage where the loki component is deployed | `string` | `""` | no | +| [loki\_tenant\_name](#input\_loki\_tenant\_name) | The tenant where the loki component is deployed | `string` | `""` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [uid](#output\_uid) | The UID of this dashboard | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/managed-grafana/data-source/loki) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/managed-grafana/data-source/loki/context.tf b/modules/managed-grafana/data-source/loki/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/managed-grafana/data-source/loki/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/managed-grafana/data-source/loki/main.tf b/modules/managed-grafana/data-source/loki/main.tf new file mode 100644 index 000000000..5b33013d0 --- /dev/null +++ b/modules/managed-grafana/data-source/loki/main.tf @@ -0,0 +1,36 @@ +locals { + enabled = module.this.enabled + + # Assume basic auth is enabled if the loki component has a basic auth username output + basic_auth_enabled = local.enabled && length(module.loki.outputs.basic_auth_username) > 0 +} + +data "aws_ssm_parameter" "basic_auth_password" { + provider = aws.source + + count = local.basic_auth_enabled ? 1 : 0 + + name = module.loki.outputs.ssm_path_basic_auth_password +} + +resource "grafana_data_source" "loki" { + count = local.enabled ? 1 : 0 + + type = "loki" + name = module.loki.outputs.id + uid = module.loki.outputs.id + url = format("https://%s", module.loki.outputs.url) + + basic_auth_enabled = local.basic_auth_enabled + basic_auth_username = local.basic_auth_enabled ? module.loki.outputs.basic_auth_username : "" + secure_json_data_encoded = jsonencode(local.basic_auth_enabled ? { + basicAuthPassword = data.aws_ssm_parameter.basic_auth_password[0].value + } : {}) + + http_headers = { + # https://grafana.com/docs/loki/latest/operations/authentication/ + # > When using Loki in multi-tenant mode, Loki requires the HTTP header + # > X-Scope-OrgID to be set to a string identifying the tenant + "X-Scope-OrgID" = "1" + } +} diff --git a/modules/managed-grafana/data-source/loki/outputs.tf b/modules/managed-grafana/data-source/loki/outputs.tf new file mode 100644 index 000000000..c2ec8d52c --- /dev/null +++ b/modules/managed-grafana/data-source/loki/outputs.tf @@ -0,0 +1,5 @@ +output "uid" { + # The output id is not the uid. It follows a format like "1:uid" + value = split(":", grafana_data_source.loki[0].id)[1] + description = "The UID of this dashboard" +} diff --git a/modules/managed-grafana/data-source/loki/provider-grafana.tf b/modules/managed-grafana/data-source/loki/provider-grafana.tf new file mode 100644 index 000000000..51d6e65ea --- /dev/null +++ b/modules/managed-grafana/data-source/loki/provider-grafana.tf @@ -0,0 +1,38 @@ +variable "grafana_component_name" { + type = string + description = "The name of the component used to provision an Amazon Managed Grafana workspace" + default = "managed-grafana/workspace" +} + +variable "grafana_api_key_component_name" { + type = string + description = "The name of the component used to provision an Amazon Managed Grafana API key" + default = "managed-grafana/api-key" +} + +module "grafana" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.grafana_component_name + + context = module.this.context +} + +module "grafana_api_key" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.grafana_api_key_component_name + + context = module.this.context +} + +data "aws_ssm_parameter" "grafana_api_key" { + name = module.grafana_api_key.outputs.ssm_path_grafana_api_key +} + +provider "grafana" { + url = format("https://%s/", module.grafana.outputs.workspace_endpoint) + auth = data.aws_ssm_parameter.grafana_api_key.value +} diff --git a/modules/managed-grafana/data-source/loki/provider-source.tf b/modules/managed-grafana/data-source/loki/provider-source.tf new file mode 100644 index 000000000..83876cb9b --- /dev/null +++ b/modules/managed-grafana/data-source/loki/provider-source.tf @@ -0,0 +1,22 @@ +module "source_account_role" { + source = "../../../account-map/modules/iam-roles" + + stage = var.loki_stage_name + tenant = var.loki_tenant_name + + context = module.this.context +} + +provider "aws" { + alias = "source" + region = var.region + + profile = module.source_account_role.terraform_profile_name + + dynamic "assume_role" { + for_each = compact([module.source_account_role.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/managed-grafana/data-source/loki/providers.tf b/modules/managed-grafana/data-source/loki/providers.tf new file mode 100644 index 000000000..59ec32354 --- /dev/null +++ b/modules/managed-grafana/data-source/loki/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/managed-grafana/data-source/loki/remote-state.tf b/modules/managed-grafana/data-source/loki/remote-state.tf new file mode 100644 index 000000000..87cf04b5e --- /dev/null +++ b/modules/managed-grafana/data-source/loki/remote-state.tf @@ -0,0 +1,36 @@ +variable "loki_component_name" { + type = string + description = "The name of the loki component" + default = "eks/loki" +} + +variable "loki_stage_name" { + type = string + description = "The stage where the loki component is deployed" + default = "" +} + +variable "loki_environment_name" { + type = string + description = "The environment where the loki component is deployed" + default = "" +} + +variable "loki_tenant_name" { + type = string + description = "The tenant where the loki component is deployed" + default = "" +} + +module "loki" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.loki_component_name + + stage = length(var.loki_stage_name) > 0 ? var.loki_stage_name : module.this.stage + environment = length(var.loki_environment_name) > 0 ? var.loki_environment_name : module.this.environment + tenant = length(var.loki_tenant_name) > 0 ? var.loki_tenant_name : module.this.tenant + + context = module.this.context +} diff --git a/modules/managed-grafana/data-source/loki/variables.tf b/modules/managed-grafana/data-source/loki/variables.tf new file mode 100644 index 000000000..0753180bf --- /dev/null +++ b/modules/managed-grafana/data-source/loki/variables.tf @@ -0,0 +1,4 @@ +variable "region" { + type = string + description = "AWS Region" +} diff --git a/modules/managed-grafana/data-source/loki/versions.tf b/modules/managed-grafana/data-source/loki/versions.tf new file mode 100644 index 000000000..0965af1f8 --- /dev/null +++ b/modules/managed-grafana/data-source/loki/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + grafana = { + source = "grafana/grafana" + version = ">= 2.18.0" + } + } +} diff --git a/modules/managed-grafana/data-source/managed-prometheus/README.md b/modules/managed-grafana/data-source/managed-prometheus/README.md new file mode 100644 index 000000000..2f3ae9bd9 --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/README.md @@ -0,0 +1,143 @@ +--- +tags: + - component/managed-grafana/data-source/managed-prometheus + - layer/grafana + - provider/aws + - provider/grafana +--- + +# Component: `managed-grafana/data-source/managed-prometheus` + +This component is responsible for provisioning an Amazon Managed Prometheus data source for an Amazon Managed Grafana +workspace. + +Use this component alongside the `managed-prometheus/workspace` component. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + grafana/datasource/defaults: + metadata: + component: managed-grafana/data-source/managed-prometheus + type: abstract + vars: + enabled: true + grafana_component_name: grafana + grafana_api_key_component_name: grafana/api-key + prometheus_component_name: prometheus + + grafana/datasource/plat-sandbox-prometheus: + metadata: + component: managed-grafana/data-source/managed-prometheus + inherits: + - grafana/datasource/defaults + vars: + name: plat-sandbox-prometheus + prometheus_tenant_name: plat + prometheus_stage_name: sandbox + + grafana/datasource/plat-dev-prometheus: + metadata: + component: managed-grafana/data-source/managed-prometheus + inherits: + - grafana/datasource/defaults + vars: + name: plat-dev-prometheus + prometheus_tenant_name: plat + prometheus_stage_name: dev + + grafana/datasource/plat-prod-prometheus: + metadata: + component: managed-grafana/data-source/managed-prometheus + inherits: + - grafana/datasource/defaults + vars: + name: plat-prod-prometheus + prometheus_tenant_name: plat + prometheus_stage_name: prod +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [grafana](#requirement\_grafana) | >= 2.18.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [grafana](#provider\_grafana) | >= 2.18.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [grafana](#module\_grafana) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [grafana\_api\_key](#module\_grafana\_api\_key) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../../account-map/modules/iam-roles | n/a | +| [prometheus](#module\_prometheus) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [grafana_data_source.managed_prometheus](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source) | resource | +| [aws_ssm_parameter.grafana_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [grafana\_api\_key\_component\_name](#input\_grafana\_api\_key\_component\_name) | The name of the component used to provision an Amazon Managed Grafana API key | `string` | `"managed-grafana/api-key"` | no | +| [grafana\_component\_name](#input\_grafana\_component\_name) | The name of the component used to provision an Amazon Managed Grafana workspace | `string` | `"managed-grafana/workspace"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [prometheus\_component\_name](#input\_prometheus\_component\_name) | The name of the Amazon Managed Prometheus component to be added as a Grafana data source | `string` | `"managed-prometheus/workspace"` | no | +| [prometheus\_environment\_name](#input\_prometheus\_environment\_name) | The environment where the Amazon Managed Prometheus component is deployed | `string` | `""` | no | +| [prometheus\_stage\_name](#input\_prometheus\_stage\_name) | The stage where the Amazon Managed Prometheus component is deployed | `string` | `""` | no | +| [prometheus\_tenant\_name](#input\_prometheus\_tenant\_name) | The tenant where the Amazon Managed Prometheus component is deployed | `string` | `""` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [uid](#output\_uid) | The UID of this dashboard | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/managed-grafana/data-source/managed-prometheus) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/managed-grafana/data-source/managed-prometheus/context.tf b/modules/managed-grafana/data-source/managed-prometheus/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/managed-grafana/data-source/managed-prometheus/main.tf b/modules/managed-grafana/data-source/managed-prometheus/main.tf new file mode 100644 index 000000000..98a0d7c70 --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/main.tf @@ -0,0 +1,20 @@ +locals { + enabled = module.this.enabled +} + +resource "grafana_data_source" "managed_prometheus" { + count = local.enabled ? 1 : 0 + + type = "prometheus" + name = module.prometheus.outputs.id + uid = module.prometheus.outputs.id + url = module.prometheus.outputs.workspace_endpoint + + json_data_encoded = jsonencode({ + sigV4Auth = true + httpMethod = "POST" + sigV4AuthType = "ec2_iam_role" + sigV4AssumeRoleArn = module.prometheus.outputs.access_role_arn + sigV4Region = module.prometheus.outputs.workspace_region + }) +} diff --git a/modules/managed-grafana/data-source/managed-prometheus/outputs.tf b/modules/managed-grafana/data-source/managed-prometheus/outputs.tf new file mode 100644 index 000000000..c4013c809 --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/outputs.tf @@ -0,0 +1,5 @@ +output "uid" { + # The output "id" includes orgId (orgId:uid). We only want uid + value = split(":", grafana_data_source.managed_prometheus[0].id)[1] + description = "The UID of this dashboard" +} diff --git a/modules/managed-grafana/data-source/managed-prometheus/provider-grafana.tf b/modules/managed-grafana/data-source/managed-prometheus/provider-grafana.tf new file mode 100644 index 000000000..51d6e65ea --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/provider-grafana.tf @@ -0,0 +1,38 @@ +variable "grafana_component_name" { + type = string + description = "The name of the component used to provision an Amazon Managed Grafana workspace" + default = "managed-grafana/workspace" +} + +variable "grafana_api_key_component_name" { + type = string + description = "The name of the component used to provision an Amazon Managed Grafana API key" + default = "managed-grafana/api-key" +} + +module "grafana" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.grafana_component_name + + context = module.this.context +} + +module "grafana_api_key" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.grafana_api_key_component_name + + context = module.this.context +} + +data "aws_ssm_parameter" "grafana_api_key" { + name = module.grafana_api_key.outputs.ssm_path_grafana_api_key +} + +provider "grafana" { + url = format("https://%s/", module.grafana.outputs.workspace_endpoint) + auth = data.aws_ssm_parameter.grafana_api_key.value +} diff --git a/modules/managed-grafana/data-source/managed-prometheus/providers.tf b/modules/managed-grafana/data-source/managed-prometheus/providers.tf new file mode 100644 index 000000000..59ec32354 --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/managed-grafana/data-source/managed-prometheus/remote-state.tf b/modules/managed-grafana/data-source/managed-prometheus/remote-state.tf new file mode 100644 index 000000000..0494020a4 --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/remote-state.tf @@ -0,0 +1,12 @@ +module "prometheus" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.prometheus_component_name + + stage = length(var.prometheus_stage_name) > 0 ? var.prometheus_stage_name : module.this.stage + environment = length(var.prometheus_environment_name) > 0 ? var.prometheus_environment_name : module.this.environment + tenant = length(var.prometheus_tenant_name) > 0 ? var.prometheus_tenant_name : module.this.tenant + + context = module.this.context +} diff --git a/modules/managed-grafana/data-source/managed-prometheus/variables.tf b/modules/managed-grafana/data-source/managed-prometheus/variables.tf new file mode 100644 index 000000000..635194b47 --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/variables.tf @@ -0,0 +1,28 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "prometheus_component_name" { + type = string + description = "The name of the Amazon Managed Prometheus component to be added as a Grafana data source" + default = "managed-prometheus/workspace" +} + +variable "prometheus_stage_name" { + type = string + description = "The stage where the Amazon Managed Prometheus component is deployed" + default = "" +} + +variable "prometheus_environment_name" { + type = string + description = "The environment where the Amazon Managed Prometheus component is deployed" + default = "" +} + +variable "prometheus_tenant_name" { + type = string + description = "The tenant where the Amazon Managed Prometheus component is deployed" + default = "" +} diff --git a/modules/managed-grafana/data-source/managed-prometheus/versions.tf b/modules/managed-grafana/data-source/managed-prometheus/versions.tf new file mode 100644 index 000000000..0965af1f8 --- /dev/null +++ b/modules/managed-grafana/data-source/managed-prometheus/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + grafana = { + source = "grafana/grafana" + version = ">= 2.18.0" + } + } +} diff --git a/modules/managed-grafana/workspace/README.md b/modules/managed-grafana/workspace/README.md new file mode 100644 index 000000000..3c2a31de1 --- /dev/null +++ b/modules/managed-grafana/workspace/README.md @@ -0,0 +1,129 @@ +--- +tags: + - component/managed-grafana/workspace + - layer/grafana + - provider/aws + - provider/grafana +--- + +# Component: `managed-grafana/workspace` + +This component is responsible for provisioning an Amazon Managed Grafana workspace. + +Amazon Managed Grafana is a fully managed service for Grafana, a popular open-source analytics platform that enables you +to query, visualize, and alert on your metrics, logs, and traces. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + grafana: + metadata: + component: managed-grafana/workspace + vars: + enabled: true + name: grafana + private_network_access_enabled: true + sso_role_associations: + - role: "ADMIN" + group_ids: + - "11111111-2222-3333-4444-555555555555" + # This grafana workspace will be allowed to assume the cross + # account access role from these prometheus components + prometheus_source_accounts: + - component: prometheus + tenant: plat + stage: sandbox + - component: prometheus + tenant: plat + stage: dev +``` + +> [!NOTE] +> +> We would prefer to have a custom URL for the provisioned Grafana workspace, but at the moment it's not supported +> natively and implementation would be non-trivial. We will continue to monitor that Issue and consider alternatives, +> such as using Cloudfront. +> +> [Issue #6: Support for Custom Domains](https://github.com/aws/amazon-managed-grafana-roadmap/issues/6) + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [managed\_grafana](#module\_managed\_grafana) | cloudposse/managed-grafana/aws | 0.1.0 | +| [prometheus](#module\_prometheus) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [security\_group](#module\_security\_group) | cloudposse/security-group/aws | 2.2.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_grafana_role_association.sso](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/grafana_role_association) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [private\_network\_access\_enabled](#input\_private\_network\_access\_enabled) | If set to `true`, enable the VPC Configuration to allow this workspace to access the private network using outputs from the vpc component | `bool` | `false` | no | +| [prometheus\_policy\_enabled](#input\_prometheus\_policy\_enabled) | Set this to `true` to allow this Grafana workspace to access Amazon Managed Prometheus in this account | `bool` | `false` | no | +| [prometheus\_source\_accounts](#input\_prometheus\_source\_accounts) | A list of objects that describe an account where Amazon Managed Prometheus is deployed. This component grants this Grafana IAM role permission to assume the Prometheus access role in that target account. Use this for cross-account access |
list(object({
component = optional(string, "managed-prometheus/workspace")
stage = string
tenant = optional(string, "")
environment = optional(string, "")
}))
| `[]` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [sso\_role\_associations](#input\_sso\_role\_associations) | A list of role to group ID list associations for granting Amazon Grafana access |
list(object({
role = string
group_ids = list(string)
}))
| `[]` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [workspace\_endpoint](#output\_workspace\_endpoint) | The returned URL of the Amazon Managed Grafana workspace | +| [workspace\_id](#output\_workspace\_id) | The ID of the Amazon Managed Grafana workspace | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/managed-grafana/workspace) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/managed-grafana/workspace/context.tf b/modules/managed-grafana/workspace/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/managed-grafana/workspace/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/managed-grafana/workspace/main.tf b/modules/managed-grafana/workspace/main.tf new file mode 100644 index 000000000..dc6ab5e8c --- /dev/null +++ b/modules/managed-grafana/workspace/main.tf @@ -0,0 +1,46 @@ +locals { + enabled = module.this.enabled + + additional_allowed_roles = compact([for prometheus in module.prometheus : prometheus.outputs.access_role_arn]) +} + +module "security_group" { + source = "cloudposse/security-group/aws" + version = "2.2.0" + + enabled = local.enabled && var.private_network_access_enabled + + allow_all_egress = true + rules = [] + vpc_id = module.vpc.outputs.vpc_id + + context = module.this.context +} + +module "managed_grafana" { + source = "cloudposse/managed-grafana/aws" + version = "0.1.0" + + enabled = local.enabled + + prometheus_policy_enabled = var.prometheus_policy_enabled + additional_allowed_roles = local.additional_allowed_roles + + vpc_configuration = var.private_network_access_enabled ? { + subnet_ids = module.vpc.outputs.private_subnet_ids + security_group_ids = [module.security_group.id] + } : {} + + context = module.this.context +} + +resource "aws_grafana_role_association" "sso" { + for_each = local.enabled ? { + for association in var.sso_role_associations : association.role => association + } : {} + + role = each.value.role + group_ids = each.value.group_ids + + workspace_id = module.managed_grafana.workspace_id +} diff --git a/modules/managed-grafana/workspace/outputs.tf b/modules/managed-grafana/workspace/outputs.tf new file mode 100644 index 000000000..4b45b5041 --- /dev/null +++ b/modules/managed-grafana/workspace/outputs.tf @@ -0,0 +1,9 @@ +output "workspace_id" { + description = "The ID of the Amazon Managed Grafana workspace" + value = module.managed_grafana.workspace_id +} + +output "workspace_endpoint" { + description = "The returned URL of the Amazon Managed Grafana workspace" + value = module.managed_grafana.workspace_endpoint +} diff --git a/modules/managed-grafana/workspace/providers.tf b/modules/managed-grafana/workspace/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/managed-grafana/workspace/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/managed-grafana/workspace/remote-state.tf b/modules/managed-grafana/workspace/remote-state.tf new file mode 100644 index 000000000..6d7e102f8 --- /dev/null +++ b/modules/managed-grafana/workspace/remote-state.tf @@ -0,0 +1,24 @@ +module "prometheus" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + for_each = local.enabled ? { + for target in var.prometheus_source_accounts : "${target.tenant}:${target.stage}:${target.environment}" => target + } : {} + + component = each.value.component + stage = each.value.stage + environment = length(each.value.environment) > 0 ? each.value.environment : module.this.environment + tenant = length(each.value.tenant) > 0 ? each.value.tenant : module.this.tenant + + context = module.this.context +} + +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "vpc" + + context = module.this.context +} diff --git a/modules/managed-grafana/workspace/variables.tf b/modules/managed-grafana/workspace/variables.tf new file mode 100644 index 000000000..6bb68e35d --- /dev/null +++ b/modules/managed-grafana/workspace/variables.tf @@ -0,0 +1,37 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "sso_role_associations" { + type = list(object({ + role = string + group_ids = list(string) + })) + description = "A list of role to group ID list associations for granting Amazon Grafana access" + default = [] +} + +variable "prometheus_policy_enabled" { + type = bool + description = "Set this to `true` to allow this Grafana workspace to access Amazon Managed Prometheus in this account" + default = false +} + +variable "prometheus_source_accounts" { + type = list(object({ + component = optional(string, "managed-prometheus/workspace") + stage = string + tenant = optional(string, "") + environment = optional(string, "") + })) + description = "A list of objects that describe an account where Amazon Managed Prometheus is deployed. This component grants this Grafana IAM role permission to assume the Prometheus access role in that target account. Use this for cross-account access" + default = [] +} + + +variable "private_network_access_enabled" { + type = bool + description = "If set to `true`, enable the VPC Configuration to allow this workspace to access the private network using outputs from the vpc component" + default = false +} diff --git a/modules/managed-grafana/workspace/versions.tf b/modules/managed-grafana/workspace/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/modules/managed-grafana/workspace/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/managed-prometheus/workspace/README.md b/modules/managed-prometheus/workspace/README.md new file mode 100644 index 000000000..f4ca4ac12 --- /dev/null +++ b/modules/managed-prometheus/workspace/README.md @@ -0,0 +1,112 @@ +--- +tags: + - component/managed-prometheus/workspace + - layer/grafana + - provider/aws +--- + +# Component: `managed-prometheus/workspace` + +This component is responsible for provisioning a workspace for Amazon Managed Service for Prometheus, also known as +Amazon Managed Prometheus (AMP). + +This component is intended to be deployed alongside Grafana. For example, use our `managed-grafana/workspace` component. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +We prefer to name the stack component with a simpler name, whereas the Terraform component should remain descriptive. + +```yaml +components: + terraform: + prometheus: + metadata: + component: managed-prometheus/workspace + vars: + enabled: true + name: prometheus + # Create cross-account role for core-auto to access AMP + grafana_account_name: core-auto +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [managed\_prometheus](#module\_managed\_prometheus) | cloudposse/managed-prometheus/aws | 0.1.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_environment\_name](#input\_account\_map\_environment\_name) | The name of the environment where `account_map` is provisioned | `string` | `"gbl"` | no | +| [account\_map\_stage\_name](#input\_account\_map\_stage\_name) | The name of the stage where `account_map` is provisioned | `string` | `"root"` | no | +| [account\_map\_tenant\_name](#input\_account\_map\_tenant\_name) | The name of the tenant where `account_map` is provisioned | `string` | `"core"` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [alert\_manager\_definition](#input\_alert\_manager\_definition) | The alert manager definition that you want to be applied. | `string` | `""` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [grafana\_account\_name](#input\_grafana\_account\_name) | The name of the account allowed to access AMP in this account. If defined, this module will create a cross-account IAM role for accessing AMP. Use this for cross-account Grafana. If not defined, no roles will be created. | `string` | `""` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [rule\_group\_namespaces](#input\_rule\_group\_namespaces) | A list of name, data objects for each Amazon Managed Service for Prometheus (AMP) Rule Group Namespace |
list(object({
name = string
data = string
}))
| `[]` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [vpc\_endpoint\_enabled](#input\_vpc\_endpoint\_enabled) | If set to `true`, restrict traffic through a VPC endpoint | `string` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [access\_role\_arn](#output\_access\_role\_arn) | If enabled with `var.allowed_account_id`, the Role ARN used for accessing Amazon Managed Prometheus in this account | +| [id](#output\_id) | The ID of this component deployment | +| [workspace\_arn](#output\_workspace\_arn) | The ARN of this Amazon Managed Prometheus workspace | +| [workspace\_endpoint](#output\_workspace\_endpoint) | The endpoint URL of this Amazon Managed Prometheus workspace | +| [workspace\_id](#output\_workspace\_id) | The ID of this Amazon Managed Prometheus workspace | +| [workspace\_region](#output\_workspace\_region) | The region where this workspace is deployed | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/managed-prometheus/workspace) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/managed-prometheus/workspace/context.tf b/modules/managed-prometheus/workspace/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/managed-prometheus/workspace/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/managed-prometheus/workspace/main.tf b/modules/managed-prometheus/workspace/main.tf new file mode 100644 index 000000000..a8fad18ec --- /dev/null +++ b/modules/managed-prometheus/workspace/main.tf @@ -0,0 +1,23 @@ +locals { + enabled = module.this.enabled + + grafana_account_id = local.enabled && length(var.grafana_account_name) > 0 ? module.account_map.outputs.full_account_map[var.grafana_account_name] : "" + + vpc_endpoint_enabled = module.this.enabled && var.vpc_endpoint_enabled +} + +module "managed_prometheus" { + source = "cloudposse/managed-prometheus/aws" + version = "0.1.1" + + enabled = local.enabled + + alert_manager_definition = var.alert_manager_definition + allowed_account_id = local.grafana_account_id + rule_group_namespaces = var.rule_group_namespaces + scraper_deployed = true + + vpc_id = local.vpc_endpoint_enabled ? module.vpc[0].outputs.vpc_id : "" + + context = module.this.context +} diff --git a/modules/managed-prometheus/workspace/outputs.tf b/modules/managed-prometheus/workspace/outputs.tf new file mode 100644 index 000000000..750f37934 --- /dev/null +++ b/modules/managed-prometheus/workspace/outputs.tf @@ -0,0 +1,29 @@ +output "id" { + description = "The ID of this component deployment" + value = module.this.id +} + +output "workspace_id" { + description = "The ID of this Amazon Managed Prometheus workspace" + value = module.managed_prometheus.workspace_id +} + +output "workspace_arn" { + description = "The ARN of this Amazon Managed Prometheus workspace" + value = module.managed_prometheus.workspace_arn +} + +output "workspace_endpoint" { + description = "The endpoint URL of this Amazon Managed Prometheus workspace" + value = module.managed_prometheus.workspace_endpoint +} + +output "workspace_region" { + description = "The region where this workspace is deployed" + value = var.region +} + +output "access_role_arn" { + description = "If enabled with `var.allowed_account_id`, the Role ARN used for accessing Amazon Managed Prometheus in this account" + value = module.managed_prometheus.access_role_arn +} diff --git a/modules/managed-prometheus/workspace/providers.tf b/modules/managed-prometheus/workspace/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/managed-prometheus/workspace/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/managed-prometheus/workspace/remote-state.tf b/modules/managed-prometheus/workspace/remote-state.tf new file mode 100644 index 000000000..9516f64f2 --- /dev/null +++ b/modules/managed-prometheus/workspace/remote-state.tf @@ -0,0 +1,22 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + tenant = var.account_map_tenant_name + environment = var.account_map_environment_name + stage = var.account_map_stage_name + + context = module.this.context +} + +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.vpc_endpoint_enabled ? 1 : 0 + + component = "vpc" + + context = module.this.context +} diff --git a/modules/managed-prometheus/workspace/variables.tf b/modules/managed-prometheus/workspace/variables.tf new file mode 100644 index 000000000..f6f4f850d --- /dev/null +++ b/modules/managed-prometheus/workspace/variables.tf @@ -0,0 +1,49 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "alert_manager_definition" { + type = string + description = "The alert manager definition that you want to be applied." + default = "" +} + +variable "rule_group_namespaces" { + type = list(object({ + name = string + data = string + })) + description = "A list of name, data objects for each Amazon Managed Service for Prometheus (AMP) Rule Group Namespace" + default = [] +} + +variable "grafana_account_name" { + type = string + description = "The name of the account allowed to access AMP in this account. If defined, this module will create a cross-account IAM role for accessing AMP. Use this for cross-account Grafana. If not defined, no roles will be created." + default = "" +} + +variable "account_map_tenant_name" { + type = string + description = "The name of the tenant where `account_map` is provisioned" + default = "core" +} + +variable "account_map_environment_name" { + type = string + description = "The name of the environment where `account_map` is provisioned" + default = "gbl" +} + +variable "account_map_stage_name" { + type = string + description = "The name of the stage where `account_map` is provisioned" + default = "root" +} + +variable "vpc_endpoint_enabled" { + type = string + description = "If set to `true`, restrict traffic through a VPC endpoint" + default = true +} diff --git a/modules/managed-prometheus/workspace/versions.tf b/modules/managed-prometheus/workspace/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/modules/managed-prometheus/workspace/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/memorydb/README.md b/modules/memorydb/README.md new file mode 100644 index 000000000..b0bdbd661 --- /dev/null +++ b/modules/memorydb/README.md @@ -0,0 +1,128 @@ +# Component: `memorydb` + +This component provisions an AWS MemoryDB cluster. MemoryDB is a fully managed, Redis-compatible, in-memory database +service. + +While Redis is commonly used as a cache, MemoryDB is designed to also function well as a +[vector database](https://docs.aws.amazon.com/memorydb/latest/devguide/vector-search.html). This makes it appropriate +for AI model backends. + +## Usage + +**Stack Level**: Regional + +### Example + +Here's an example snippet for how to use this component: + +```yaml +components: + terraform: + vpc: + vars: + availability_zones: + - "a" + - "b" + - "c" + ipv4_primary_cidr_block: "10.111.0.0/18" + memorydb: + vars: {} +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 5.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [memorydb](#module\_memorydb) | cloudposse/memorydb/aws | 0.1.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_username](#input\_admin\_username) | The username for the MemoryDB admin | `string` | `"admin"` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [auto\_minor\_version\_upgrade](#input\_auto\_minor\_version\_upgrade) | Indicates that minor engine upgrades will be applied automatically to the cluster during the maintenance window | `bool` | `true` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [engine\_version](#input\_engine\_version) | The version of the Redis engine to use | `string` | `"6.2"` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [maintenance\_window](#input\_maintenance\_window) | The weekly time range during which system maintenance can occur | `string` | `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [node\_type](#input\_node\_type) | The compute and memory capacity of the nodes in the cluster | `string` | `"db.r6g.large"` | no | +| [num\_replicas\_per\_shard](#input\_num\_replicas\_per\_shard) | The number of replicas per shard | `number` | `1` | no | +| [num\_shards](#input\_num\_shards) | The number of shards in the cluster | `number` | `1` | no | +| [parameter\_group\_family](#input\_parameter\_group\_family) | The name of the parameter group family | `string` | `"memorydb_redis6"` | no | +| [parameters](#input\_parameters) | Key-value mapping of parameters to apply to the parameter group | `map(string)` | `{}` | no | +| [port](#input\_port) | The port on which the cluster accepts connections | `number` | `6379` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [security\_group\_ids](#input\_security\_group\_ids) | List of security group IDs to associate with the MemoryDB cluster | `list(string)` | `[]` | no | +| [snapshot\_arns](#input\_snapshot\_arns) | List of ARNs for the snapshots to be restored. NOTE: destroys the existing cluster. Use for restoring. | `list(string)` | `[]` | no | +| [snapshot\_retention\_limit](#input\_snapshot\_retention\_limit) | The number of days for which MemoryDB retains automatic snapshots before deleting them | `number` | `null` | no | +| [snapshot\_window](#input\_snapshot\_window) | The daily time range during which MemoryDB begins taking daily snapshots | `string` | `null` | no | +| [sns\_topic\_arn](#input\_sns\_topic\_arn) | The ARN of the SNS topic to send notifications to | `string` | `null` | no | +| [ssm\_kms\_key\_id](#input\_ssm\_kms\_key\_id) | The KMS key ID to use for SSM parameter encryption. If not specified, the default key will be used. | `string` | `null` | no | +| [ssm\_parameter\_name](#input\_ssm\_parameter\_name) | The name of the SSM parameter to store the password in. If not specified, the password will be stored in `/{context.id}/admin_password` | `string` | `""` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [tls\_enabled](#input\_tls\_enabled) | Indicates whether Transport Layer Security (TLS) encryption is enabled for the cluster | `bool` | `true` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | The name of the VPC component. This is used to pick out subnets for the MemoryDB cluster | `string` | `"vpc"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [admin\_acl\_arn](#output\_admin\_acl\_arn) | The ARN of the MemoryDB user's ACL | +| [admin\_arn](#output\_admin\_arn) | The ARN of the MemoryDB user | +| [admin\_password\_ssm\_parameter\_name](#output\_admin\_password\_ssm\_parameter\_name) | The name of the SSM parameter storing the password for the MemoryDB user | +| [admin\_username](#output\_admin\_username) | The username for the MemoryDB user | +| [arn](#output\_arn) | The ARN of the MemoryDB cluster | +| [cluster\_endpoint](#output\_cluster\_endpoint) | The endpoint of the MemoryDB cluster | +| [engine\_patch\_version](#output\_engine\_patch\_version) | The Redis engine version | +| [id](#output\_id) | The name of the MemoryDB cluster | +| [parameter\_group\_arn](#output\_parameter\_group\_arn) | The ARN of the MemoryDB parameter group | +| [parameter\_group\_id](#output\_parameter\_group\_id) | The name of the MemoryDB parameter group | +| [shards](#output\_shards) | The shard details for the MemoryDB cluster | +| [subnet\_group\_arn](#output\_subnet\_group\_arn) | The ARN of the MemoryDB subnet group | +| [subnet\_group\_id](#output\_subnet\_group\_id) | The name of the MemoryDB subnet group | + + + +## References + +- [MemoryDB Documentation](https://docs.aws.amazon.com/memorydb/latest/devguide/what-is-memorydb.html) +- [Vector Searches with MemoryDB](https://docs.aws.amazon.com/memorydb/latest/devguide/vector-search.html) +- AWS CLI + [command to list MemoryDB engine versions](https://docs.aws.amazon.com/cli/latest/reference/memorydb/describe-engine-versions.html): + `aws memorydb describe-engine-versions`. + +[](https://cpco.io/component) diff --git a/modules/memorydb/context.tf b/modules/memorydb/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/memorydb/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/memorydb/main.tf b/modules/memorydb/main.tf new file mode 100644 index 000000000..30d93ad5c --- /dev/null +++ b/modules/memorydb/main.tf @@ -0,0 +1,37 @@ +locals { + vpc = module.vpc.outputs + private_subnet_ids = local.vpc.private_subnet_ids + + default_ssm_parameter_name = "/${module.this.id}/admin_password" + ssm_parameter_name = var.ssm_parameter_name == "" ? local.default_ssm_parameter_name : var.ssm_parameter_name +} + +module "memorydb" { + source = "cloudposse/memorydb/aws" + version = "0.1.0" + + node_type = var.node_type + num_shards = var.num_shards + num_replicas_per_shard = var.num_replicas_per_shard + tls_enabled = var.tls_enabled + engine_version = var.engine_version + auto_minor_version_upgrade = var.auto_minor_version_upgrade + subnet_ids = local.private_subnet_ids + security_group_ids = var.security_group_ids + port = var.port + maintenance_window = var.maintenance_window + + snapshot_window = var.snapshot_window + snapshot_retention_limit = var.snapshot_retention_limit + snapshot_arns = var.snapshot_arns + sns_topic_arn = var.sns_topic_arn + + admin_username = var.admin_username + + ssm_parameter_name = local.ssm_parameter_name + + parameter_group_family = var.parameter_group_family + parameters = var.parameters + + context = module.this.context +} diff --git a/modules/memorydb/outputs.tf b/modules/memorydb/outputs.tf new file mode 100644 index 000000000..383c1f818 --- /dev/null +++ b/modules/memorydb/outputs.tf @@ -0,0 +1,64 @@ +output "id" { + description = "The name of the MemoryDB cluster" + value = module.memorydb.id +} + +output "arn" { + description = "The ARN of the MemoryDB cluster" + value = module.memorydb.arn +} + +output "cluster_endpoint" { + description = "The endpoint of the MemoryDB cluster" + value = module.memorydb.cluster_endpoint +} + +output "engine_patch_version" { + description = "The Redis engine version" + value = module.memorydb.engine_patch_version +} + +output "parameter_group_id" { + description = "The name of the MemoryDB parameter group" + value = module.memorydb.id +} + +output "parameter_group_arn" { + description = "The ARN of the MemoryDB parameter group" + value = module.memorydb.arn +} + +output "subnet_group_id" { + description = "The name of the MemoryDB subnet group" + value = module.memorydb.id +} + +output "subnet_group_arn" { + description = "The ARN of the MemoryDB subnet group" + value = module.memorydb.arn +} + +output "shards" { + description = "The shard details for the MemoryDB cluster" + value = module.memorydb.shards +} + +output "admin_username" { + description = "The username for the MemoryDB user" + value = module.memorydb.admin_username +} + +output "admin_arn" { + description = "The ARN of the MemoryDB user" + value = module.memorydb.admin_arn +} + +output "admin_acl_arn" { + description = "The ARN of the MemoryDB user's ACL" + value = module.memorydb.admin_acl_arn +} + +output "admin_password_ssm_parameter_name" { + description = "The name of the SSM parameter storing the password for the MemoryDB user" + value = module.memorydb.admin_password_ssm_parameter_name +} diff --git a/modules/memorydb/providers.tf b/modules/memorydb/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/memorydb/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/memorydb/remote-state.tf b/modules/memorydb/remote-state.tf new file mode 100644 index 000000000..4e2391525 --- /dev/null +++ b/modules/memorydb/remote-state.tf @@ -0,0 +1,8 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.vpc_component_name + + context = module.this.context +} diff --git a/modules/memorydb/variables.tf b/modules/memorydb/variables.tf new file mode 100644 index 000000000..32515a6c6 --- /dev/null +++ b/modules/memorydb/variables.tf @@ -0,0 +1,132 @@ +variable "node_type" { + description = "The compute and memory capacity of the nodes in the cluster" + type = string + default = "db.r6g.large" + nullable = false +} + +variable "num_shards" { + description = "The number of shards in the cluster" + type = number + default = 1 + nullable = false +} + +variable "num_replicas_per_shard" { + description = "The number of replicas per shard" + type = number + default = 1 + nullable = false +} + +variable "tls_enabled" { + description = "Indicates whether Transport Layer Security (TLS) encryption is enabled for the cluster" + type = bool + default = true + nullable = false +} + +variable "engine_version" { + description = "The version of the Redis engine to use" + type = string + default = "6.2" + nullable = false +} + +variable "auto_minor_version_upgrade" { + description = "Indicates that minor engine upgrades will be applied automatically to the cluster during the maintenance window" + type = bool + default = true + nullable = false +} + +variable "security_group_ids" { + description = "List of security group IDs to associate with the MemoryDB cluster" + type = list(string) + default = [] + nullable = false +} + +variable "port" { + description = "The port on which the cluster accepts connections" + type = number + default = 6379 + nullable = false +} + +variable "maintenance_window" { + description = "The weekly time range during which system maintenance can occur" + type = string + default = null + nullable = true +} + +variable "snapshot_window" { + description = "The daily time range during which MemoryDB begins taking daily snapshots" + type = string + default = null + nullable = true +} + +variable "snapshot_retention_limit" { + description = "The number of days for which MemoryDB retains automatic snapshots before deleting them" + type = number + default = null + nullable = true +} + +variable "snapshot_arns" { + description = "List of ARNs for the snapshots to be restored. NOTE: destroys the existing cluster. Use for restoring." + type = list(string) + default = [] + nullable = false +} + +variable "admin_username" { + description = "The username for the MemoryDB admin" + type = string + default = "admin" + nullable = false +} + +variable "ssm_kms_key_id" { + description = "The KMS key ID to use for SSM parameter encryption. If not specified, the default key will be used." + type = string + default = null + nullable = true +} + +variable "ssm_parameter_name" { + description = "The name of the SSM parameter to store the password in. If not specified, the password will be stored in `/{context.id}/admin_password`" + type = string + default = "" + nullable = false +} + +variable "parameter_group_family" { + description = "The name of the parameter group family" + type = string + default = "memorydb_redis6" + nullable = false +} + +variable "parameters" { + description = "Key-value mapping of parameters to apply to the parameter group" + type = map(string) + default = {} + nullable = false +} + +variable "sns_topic_arn" { + description = "The ARN of the SNS topic to send notifications to" + type = string + default = null + nullable = true +} + +variable "vpc_component_name" { + description = "The name of the VPC component. This is used to pick out subnets for the MemoryDB cluster" + type = string + default = "vpc" + nullable = false +} diff --git a/modules/memorydb/versions.tf b/modules/memorydb/versions.tf new file mode 100644 index 000000000..6d7861d01 --- /dev/null +++ b/modules/memorydb/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.0" + } + } +} diff --git a/modules/mq-broker/README.md b/modules/mq-broker/README.md index fea5ff62d..56466728b 100644 --- a/modules/mq-broker/README.md +++ b/modules/mq-broker/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/mq-broker + - layer/unassigned + - provider/aws +--- + # Component: `mq-broker` This component is responsible for provisioning an AmazonMQ broker and corresponding security group. @@ -27,6 +34,7 @@ components: use_aws_owned_key: true ``` + ## Requirements @@ -35,8 +43,8 @@ components: | [terraform](#requirement\_terraform) | >= 0.13.0 | | [aws](#requirement\_aws) | >= 3.0 | | [local](#requirement\_local) | >= 1.3 | -| [template](#requirement\_template) | >= 2.0 | -| [utils](#requirement\_utils) | >= 0.3.0 | +| [template](#requirement\_template) | >= 2.2 | +| [utils](#requirement\_utils) | >= 1.10.0 | ## Providers @@ -46,11 +54,11 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.17.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [mq\_broker](#module\_mq\_broker) | cloudposse/mq-broker/aws | 0.14.0 | | [this](#module\_this) | cloudposse/label/null | 0.24.1 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.17.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -79,7 +87,6 @@ No resources. | [general\_log\_enabled](#input\_general\_log\_enabled) | Enables general logging via CloudWatch | `bool` | `true` | no | | [host\_instance\_type](#input\_host\_instance\_type) | The broker's instance type. e.g. mq.t2.micro or mq.m4.large | `string` | `"mq.t3.micro"` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for default, which is `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | | [kms\_mq\_key\_arn](#input\_kms\_mq\_key\_arn) | ARN of the AWS KMS key used for Amazon MQ encryption | `string` | `null` | no | | [kms\_ssm\_key\_arn](#input\_kms\_ssm\_key\_arn) | ARN of the AWS KMS key used for SSM encryption | `string` | `"alias/aws/ssm"` | no | | [label\_key\_case](#input\_label\_key\_case) | The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | @@ -126,10 +133,11 @@ No resources. | [secondary\_stomp\_ssl\_endpoint](#output\_secondary\_stomp\_ssl\_endpoint) | AmazonMQ secondary STOMP+SSL endpoint | | [secondary\_wss\_endpoint](#output\_secondary\_wss\_endpoint) | AmazonMQ secondary WSS endpoint | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/mq-broker) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/mq-broker) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/mq-broker/default.auto.tfvars b/modules/mq-broker/default.auto.tfvars deleted file mode 100755 index 153d814a7..000000000 --- a/modules/mq-broker/default.auto.tfvars +++ /dev/null @@ -1,5 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - -name = "mq-broker" diff --git a/modules/mq-broker/main.tf b/modules/mq-broker/main.tf old mode 100755 new mode 100644 diff --git a/modules/mq-broker/outputs.tf b/modules/mq-broker/outputs.tf old mode 100755 new mode 100644 diff --git a/modules/mq-broker/providers.tf b/modules/mq-broker/providers.tf old mode 100755 new mode 100644 index eb5dcb247..ef923e10a --- a/modules/mq-broker/providers.tf +++ b/modules/mq-broker/providers.tf @@ -1,17 +1,19 @@ provider "aws" { region = var.region - # `terraform import` will not use data from a data source, so on import we have to explicitly specify the profile - profile = coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } } module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} diff --git a/modules/mq-broker/remote-state.tf b/modules/mq-broker/remote-state.tf index 82d7050f1..cca23e913 100644 --- a/modules/mq-broker/remote-state.tf +++ b/modules/mq-broker/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.17.0" + version = "1.5.0" stack_config_local_path = "../../../stacks" component = "vpc" @@ -10,7 +10,7 @@ module "vpc" { module "eks" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.17.0" + version = "1.5.0" stack_config_local_path = "../../../stacks" component = "eks" diff --git a/modules/mq-broker/variables.tf b/modules/mq-broker/variables.tf old mode 100755 new mode 100644 diff --git a/modules/mq-broker/versions.tf b/modules/mq-broker/versions.tf old mode 100755 new mode 100644 index 42b56538d..23548707e --- a/modules/mq-broker/versions.tf +++ b/modules/mq-broker/versions.tf @@ -7,8 +7,8 @@ terraform { version = ">= 3.0" } template = { - source = "hashicorp/template" - version = ">= 2.0" + source = "cloudposse/template" + version = ">= 2.2" } local = { source = "hashicorp/local" @@ -16,7 +16,7 @@ terraform { } utils = { source = "cloudposse/utils" - version = ">= 0.3.0" + version = ">= 1.10.0" } } } diff --git a/modules/msk/README.md b/modules/msk/README.md new file mode 100644 index 000000000..e18148bd9 --- /dev/null +++ b/modules/msk/README.md @@ -0,0 +1,224 @@ +--- +tags: + - component/msk + - layer/unassigned + - provider/aws +--- + +# Component: `msk` + +This component is responsible for provisioning [Amazon Managed Streaming](https://aws.amazon.com/msk/) clusters for +[Apache Kafka](https://aws.amazon.com/msk/what-is-kafka/). + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + msk: + metadata: + component: "msk" + vars: + enabled: true + name: "msk" + vpc_component_name: "vpc" + dns_delegated_component_name: "dns-delegated" + dns_delegated_environment_name: "gbl" + # https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html + kafka_version: "3.4.0" + public_access_enabled: false + # https://aws.amazon.com/msk/pricing/ + broker_instance_type: "kafka.m5.large" + # Number of brokers per AZ + broker_per_zone: 1 + # `broker_dns_records_count` specifies how many DNS records to create for the broker endpoints in the DNS zone provided in the `zone_id` variable. + # This corresponds to the total number of broker endpoints created by the module. + # Calculate this number by multiplying the `broker_per_zone` variable by the subnet count. + broker_dns_records_count: 3 + broker_volume_size: 500 + client_broker: "TLS_PLAINTEXT" + encryption_in_cluster: true + encryption_at_rest_kms_key_arn: "" + enhanced_monitoring: "DEFAULT" + certificate_authority_arns: [] + + # Authentication methods + client_allow_unauthenticated: true + client_sasl_scram_enabled: false + client_sasl_scram_secret_association_enabled: false + client_sasl_scram_secret_association_arns: [] + client_sasl_iam_enabled: false + client_tls_auth_enabled: false + + jmx_exporter_enabled: false + node_exporter_enabled: false + cloudwatch_logs_enabled: false + firehose_logs_enabled: false + firehose_delivery_stream: "" + s3_logs_enabled: false + s3_logs_bucket: "" + s3_logs_prefix: "" + properties: {} + autoscaling_enabled: true + storage_autoscaling_target_value: 60 + storage_autoscaling_max_capacity: null + storage_autoscaling_disable_scale_in: false + create_security_group: true + security_group_rule_description: "Allow inbound %s traffic" + # A list of IDs of Security Groups to allow access to the cluster security group + allowed_security_group_ids: [] + # A list of IPv4 CIDRs to allow access to the cluster security group + allowed_cidr_blocks: [] +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [kafka](#module\_kafka) | cloudposse/msk-apache-kafka-cluster/aws | 2.3.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_security\_group\_rules](#input\_additional\_security\_group\_rules) | A list of Security Group rule objects to add to the created security group, in addition to the ones
this module normally creates. (To suppress the module's rules, set `create_security_group` to false
and supply your own security group(s) via `associated_security_group_ids`.)
The keys and values of the objects are fully compatible with the `aws_security_group_rule` resource, except
for `security_group_id` which will be ignored, and the optional "key" which, if provided, must be unique and known at "plan" time.
For more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule
and https://github.com/cloudposse/terraform-aws-security-group. | `list(any)` | `[]` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [allow\_all\_egress](#input\_allow\_all\_egress) | If `true`, the created security group will allow egress on all ports and protocols to all IP addresses.
If this is false and no egress rules are otherwise specified, then no egress will be allowed. | `bool` | `true` | no | +| [allowed\_cidr\_blocks](#input\_allowed\_cidr\_blocks) | A list of IPv4 CIDRs to allow access to the security group created by this module.
The length of this list must be known at "plan" time. | `list(string)` | `[]` | no | +| [allowed\_security\_group\_ids](#input\_allowed\_security\_group\_ids) | A list of IDs of Security Groups to allow access to the security group created by this module.
The length of this list must be known at "plan" time. | `list(string)` | `[]` | no | +| [associated\_security\_group\_ids](#input\_associated\_security\_group\_ids) | A list of IDs of Security Groups to associate the created resource with, in addition to the created security group.
These security groups will not be modified and, if `create_security_group` is `false`, must have rules providing the desired access. | `list(string)` | `[]` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [autoscaling\_enabled](#input\_autoscaling\_enabled) | To automatically expand your cluster's storage in response to increased usage, you can enable this. [More info](https://docs.aws.amazon.com/msk/latest/developerguide/msk-autoexpand.html) | `bool` | `true` | no | +| [broker\_dns\_records\_count](#input\_broker\_dns\_records\_count) | This variable specifies how many DNS records to create for the broker endpoints in the DNS zone provided in the `zone_id` variable.
This corresponds to the total number of broker endpoints created by the module.
Calculate this number by multiplying the `broker_per_zone` variable by the subnet count.
This variable is necessary to prevent the Terraform error:
The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. | `number` | `0` | no | +| [broker\_instance\_type](#input\_broker\_instance\_type) | The instance type to use for the Kafka brokers | `string` | n/a | yes | +| [broker\_per\_zone](#input\_broker\_per\_zone) | Number of Kafka brokers per zone | `number` | `1` | no | +| [broker\_volume\_size](#input\_broker\_volume\_size) | The size in GiB of the EBS volume for the data drive on each broker node | `number` | `1000` | no | +| [certificate\_authority\_arns](#input\_certificate\_authority\_arns) | List of ACM Certificate Authority Amazon Resource Names (ARNs) to be used for TLS client authentication | `list(string)` | `[]` | no | +| [client\_allow\_unauthenticated](#input\_client\_allow\_unauthenticated) | Enable unauthenticated access | `bool` | `false` | no | +| [client\_broker](#input\_client\_broker) | Encryption setting for data in transit between clients and brokers. Valid values: `TLS`, `TLS_PLAINTEXT`, and `PLAINTEXT` | `string` | `"TLS"` | no | +| [client\_sasl\_iam\_enabled](#input\_client\_sasl\_iam\_enabled) | Enable client authentication via IAM policies. Cannot be set to `true` at the same time as `client_tls_auth_enabled` | `bool` | `false` | no | +| [client\_sasl\_scram\_enabled](#input\_client\_sasl\_scram\_enabled) | Enable SCRAM client authentication via AWS Secrets Manager. Cannot be set to `true` at the same time as `client_tls_auth_enabled` | `bool` | `false` | no | +| [client\_sasl\_scram\_secret\_association\_arns](#input\_client\_sasl\_scram\_secret\_association\_arns) | List of AWS Secrets Manager secret ARNs for SCRAM authentication | `list(string)` | `[]` | no | +| [client\_sasl\_scram\_secret\_association\_enabled](#input\_client\_sasl\_scram\_secret\_association\_enabled) | Enable the list of AWS Secrets Manager secret ARNs for SCRAM authentication | `bool` | `true` | no | +| [client\_tls\_auth\_enabled](#input\_client\_tls\_auth\_enabled) | Set `true` to enable the Client TLS Authentication | `bool` | `false` | no | +| [cloudwatch\_logs\_enabled](#input\_cloudwatch\_logs\_enabled) | Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs | `bool` | `false` | no | +| [cloudwatch\_logs\_log\_group](#input\_cloudwatch\_logs\_log\_group) | Name of the Cloudwatch Log Group to deliver logs to | `string` | `null` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_security\_group](#input\_create\_security\_group) | Set `true` to create and configure a new security group. If false, `associated_security_group_ids` must be provided. | `bool` | `true` | no | +| [custom\_broker\_dns\_name](#input\_custom\_broker\_dns\_name) | Custom Route53 DNS hostname for MSK brokers. Use `%%ID%%` key to specify brokers index in the hostname. Example: `kafka-broker%%ID%%.example.com` | `string` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dns\_delegated\_component\_name](#input\_dns\_delegated\_component\_name) | The component name of `dns-delegated` | `string` | `"dns-delegated"` | no | +| [dns\_delegated\_environment\_name](#input\_dns\_delegated\_environment\_name) | The environment name of `dns-delegated` | `string` | `"gbl"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [encryption\_at\_rest\_kms\_key\_arn](#input\_encryption\_at\_rest\_kms\_key\_arn) | You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest | `string` | `""` | no | +| [encryption\_in\_cluster](#input\_encryption\_in\_cluster) | Whether data communication among broker nodes is encrypted | `bool` | `true` | no | +| [enhanced\_monitoring](#input\_enhanced\_monitoring) | Specify the desired enhanced MSK CloudWatch monitoring level. Valid values: `DEFAULT`, `PER_BROKER`, and `PER_TOPIC_PER_BROKER` | `string` | `"DEFAULT"` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [firehose\_delivery\_stream](#input\_firehose\_delivery\_stream) | Name of the Kinesis Data Firehose delivery stream to deliver logs to | `string` | `""` | no | +| [firehose\_logs\_enabled](#input\_firehose\_logs\_enabled) | Indicates whether you want to enable or disable streaming broker logs to Kinesis Data Firehose | `bool` | `false` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [inline\_rules\_enabled](#input\_inline\_rules\_enabled) | NOT RECOMMENDED. Create rules "inline" instead of as separate `aws_security_group_rule` resources.
See [#20046](https://github.com/hashicorp/terraform-provider-aws/issues/20046) for one of several issues with inline rules.
See [this post](https://github.com/hashicorp/terraform-provider-aws/pull/9032#issuecomment-639545250) for details on the difference between inline rules and rule resources. | `bool` | `false` | no | +| [jmx\_exporter\_enabled](#input\_jmx\_exporter\_enabled) | Set `true` to enable the JMX Exporter | `bool` | `false` | no | +| [kafka\_version](#input\_kafka\_version) | The desired Kafka software version.
Refer to https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html for more details | `string` | n/a | yes | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [node\_exporter\_enabled](#input\_node\_exporter\_enabled) | Set `true` to enable the Node Exporter | `bool` | `false` | no | +| [preserve\_security\_group\_id](#input\_preserve\_security\_group\_id) | When `false` and `security_group_create_before_destroy` is `true`, changes to security group rules
cause a new security group to be created with the new rules, and the existing security group is then
replaced with the new one, eliminating any service interruption.
When `true` or when changing the value (from `false` to `true` or from `true` to `false`),
existing security group rules will be deleted before new ones are created, resulting in a service interruption,
but preserving the security group itself.
**NOTE:** Setting this to `true` does not guarantee the security group will never be replaced,
it only keeps changes to the security group rules from triggering a replacement.
See the [terraform-aws-security-group README](https://github.com/cloudposse/terraform-aws-security-group) for further discussion. | `bool` | `false` | no | +| [properties](#input\_properties) | Contents of the server.properties file. Supported properties are documented in the [MSK Developer Guide](https://docs.aws.amazon.com/msk/latest/developerguide/msk-configuration-properties.html) | `map(string)` | `{}` | no | +| [public\_access\_enabled](#input\_public\_access\_enabled) | Enable public access to MSK cluster (given that all of the requirements are met) | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS region | `string` | n/a | yes | +| [s3\_logs\_bucket](#input\_s3\_logs\_bucket) | Name of the S3 bucket to deliver logs to | `string` | `""` | no | +| [s3\_logs\_enabled](#input\_s3\_logs\_enabled) | Indicates whether you want to enable or disable streaming broker logs to S3 | `bool` | `false` | no | +| [s3\_logs\_prefix](#input\_s3\_logs\_prefix) | Prefix to append to the S3 folder name logs are delivered to | `string` | `""` | no | +| [security\_group\_create\_before\_destroy](#input\_security\_group\_create\_before\_destroy) | Set `true` to enable terraform `create_before_destroy` behavior on the created security group.
We only recommend setting this `false` if you are importing an existing security group
that you do not want replaced and therefore need full control over its name.
Note that changing this value will always cause the security group to be replaced. | `bool` | `true` | no | +| [security\_group\_create\_timeout](#input\_security\_group\_create\_timeout) | How long to wait for the security group to be created. | `string` | `"10m"` | no | +| [security\_group\_delete\_timeout](#input\_security\_group\_delete\_timeout) | How long to retry on `DependencyViolation` errors during security group deletion from
lingering ENIs left by certain AWS services such as Elastic Load Balancing. | `string` | `"15m"` | no | +| [security\_group\_description](#input\_security\_group\_description) | The description to assign to the created Security Group.
Warning: Changing the description causes the security group to be replaced. | `string` | `"Managed by Terraform"` | no | +| [security\_group\_name](#input\_security\_group\_name) | The name to assign to the created security group. Must be unique within the VPC.
If not provided, will be derived from the `null-label.context` passed in.
If `create_before_destroy` is true, will be used as a name prefix. | `list(string)` | `[]` | no | +| [security\_group\_rule\_description](#input\_security\_group\_rule\_description) | The description to place on each security group rule. The %s will be replaced with the protocol name | `string` | `"Allow inbound %s traffic"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [storage\_autoscaling\_disable\_scale\_in](#input\_storage\_autoscaling\_disable\_scale\_in) | If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource | `bool` | `false` | no | +| [storage\_autoscaling\_max\_capacity](#input\_storage\_autoscaling\_max\_capacity) | Maximum size the autoscaling policy can scale storage. Defaults to `broker_volume_size` | `number` | `null` | no | +| [storage\_autoscaling\_target\_value](#input\_storage\_autoscaling\_target\_value) | Percentage of storage used to trigger autoscaled storage increase | `number` | `60` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | The name of the Atmos VPC component | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [bootstrap\_brokers](#output\_bootstrap\_brokers) | Comma separated list of one or more hostname:port pairs of Kafka brokers suitable to bootstrap connectivity to the Kafka cluster | +| [bootstrap\_brokers\_public\_sasl\_iam](#output\_bootstrap\_brokers\_public\_sasl\_iam) | Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for public access to the Kafka cluster using SASL/IAM | +| [bootstrap\_brokers\_public\_sasl\_scram](#output\_bootstrap\_brokers\_public\_sasl\_scram) | Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for public access to the Kafka cluster using SASL/SCRAM | +| [bootstrap\_brokers\_public\_tls](#output\_bootstrap\_brokers\_public\_tls) | Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for public access to the Kafka cluster using TLS | +| [bootstrap\_brokers\_sasl\_iam](#output\_bootstrap\_brokers\_sasl\_iam) | Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for access to the Kafka cluster using SASL/IAM | +| [bootstrap\_brokers\_sasl\_scram](#output\_bootstrap\_brokers\_sasl\_scram) | Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for access to the Kafka cluster using SASL/SCRAM | +| [bootstrap\_brokers\_tls](#output\_bootstrap\_brokers\_tls) | Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for access to the Kafka cluster using TLS | +| [broker\_endpoints](#output\_broker\_endpoints) | List of broker endpoints | +| [cluster\_arn](#output\_cluster\_arn) | Amazon Resource Name (ARN) of the MSK cluster | +| [cluster\_name](#output\_cluster\_name) | The cluster name of the MSK cluster | +| [config\_arn](#output\_config\_arn) | Amazon Resource Name (ARN) of the MSK configuration | +| [current\_version](#output\_current\_version) | Current version of the MSK Cluster | +| [hostnames](#output\_hostnames) | List of MSK Cluster broker DNS hostnames | +| [latest\_revision](#output\_latest\_revision) | Latest revision of the MSK configuration | +| [security\_group\_arn](#output\_security\_group\_arn) | The ARN of the created security group | +| [security\_group\_id](#output\_security\_group\_id) | The ID of the created security group | +| [security\_group\_name](#output\_security\_group\_name) | The name of the created security group | +| [storage\_mode](#output\_storage\_mode) | Storage mode for supported storage tiers | +| [zookeeper\_connect\_string](#output\_zookeeper\_connect\_string) | Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster | +| [zookeeper\_connect\_string\_tls](#output\_zookeeper\_connect\_string\_tls) | Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster via TLS | + + + +## References + +- https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster +- https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_serverless_cluster +- https://aws.amazon.com/blogs/big-data/securing-apache-kafka-is-easy-and-familiar-with-iam-access-control-for-amazon-msk/ +- https://docs.aws.amazon.com/msk/latest/developerguide/security-iam.html +- https://docs.aws.amazon.com/msk/latest/developerguide/iam-access-control.html +- https://docs.aws.amazon.com/msk/latest/developerguide/kafka_apis_iam.html +- https://github.com/aws/aws-msk-iam-auth +- https://www.cloudthat.com/resources/blog/a-guide-to-create-aws-msk-cluster-with-iam-based-authentication +- https://blog.devops.dev/how-to-use-iam-auth-with-aws-msk-a-step-by-step-guide-2023-eb8291781fcb +- https://www.kai-waehner.de/blog/2022/08/30/when-not-to-choose-amazon-msk-serverless-for-apache-kafka/ +- https://stackoverflow.com/questions/72508438/connect-python-to-msk-with-iam-role-based-authentication +- https://github.com/aws/aws-msk-iam-auth/issues/10 +- https://aws.amazon.com/msk/faqs/ +- https://aws.amazon.com/blogs/big-data/secure-connectivity-patterns-to-access-amazon-msk-across-aws-regions/ +- https://docs.aws.amazon.com/msk/latest/developerguide/client-access.html +- https://repost.aws/knowledge-center/msk-broker-custom-ports + +[](https://cpco.io/component) diff --git a/modules/msk/context.tf b/modules/msk/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/msk/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/msk/main.tf b/modules/msk/main.tf new file mode 100644 index 000000000..6f538a990 --- /dev/null +++ b/modules/msk/main.tf @@ -0,0 +1,68 @@ +locals { + enabled = module.this.enabled + vpc_outputs = module.vpc.outputs +} + +module "kafka" { + source = "cloudposse/msk-apache-kafka-cluster/aws" + version = "2.3.0" + + # VPC and subnets + vpc_id = local.vpc_outputs.vpc_id + subnet_ids = local.vpc_outputs.private_subnet_ids + + # Cluster config + kafka_version = var.kafka_version + broker_per_zone = var.broker_per_zone + broker_instance_type = var.broker_instance_type + broker_volume_size = var.broker_volume_size + client_broker = var.client_broker + encryption_in_cluster = var.encryption_in_cluster + encryption_at_rest_kms_key_arn = var.encryption_at_rest_kms_key_arn + enhanced_monitoring = var.enhanced_monitoring + certificate_authority_arns = var.certificate_authority_arns + client_allow_unauthenticated = var.client_allow_unauthenticated + client_sasl_scram_enabled = var.client_sasl_scram_enabled + client_sasl_scram_secret_association_enabled = var.client_sasl_scram_secret_association_enabled + client_sasl_scram_secret_association_arns = var.client_sasl_scram_secret_association_arns + client_sasl_iam_enabled = var.client_sasl_iam_enabled + client_tls_auth_enabled = var.client_tls_auth_enabled + jmx_exporter_enabled = var.jmx_exporter_enabled + node_exporter_enabled = var.node_exporter_enabled + cloudwatch_logs_enabled = var.cloudwatch_logs_enabled + cloudwatch_logs_log_group = var.cloudwatch_logs_log_group + firehose_logs_enabled = var.firehose_logs_enabled + firehose_delivery_stream = var.firehose_delivery_stream + s3_logs_enabled = var.s3_logs_enabled + s3_logs_bucket = var.s3_logs_bucket + s3_logs_prefix = var.s3_logs_prefix + properties = var.properties + autoscaling_enabled = var.autoscaling_enabled + storage_autoscaling_target_value = var.storage_autoscaling_target_value + storage_autoscaling_max_capacity = var.storage_autoscaling_max_capacity + storage_autoscaling_disable_scale_in = var.storage_autoscaling_disable_scale_in + security_group_rule_description = var.security_group_rule_description + public_access_enabled = var.public_access_enabled + + # DNS hostname records + zone_id = module.dns_delegated.outputs.default_dns_zone_id + broker_dns_records_count = var.broker_dns_records_count + custom_broker_dns_name = var.custom_broker_dns_name + + # Cluster Security Group + allowed_security_group_ids = var.allowed_security_group_ids + allowed_cidr_blocks = var.allowed_cidr_blocks + associated_security_group_ids = var.associated_security_group_ids + create_security_group = var.create_security_group + security_group_name = var.security_group_name + security_group_description = var.security_group_description + security_group_create_before_destroy = var.security_group_create_before_destroy + preserve_security_group_id = var.preserve_security_group_id + security_group_create_timeout = var.security_group_create_timeout + security_group_delete_timeout = var.security_group_delete_timeout + allow_all_egress = var.allow_all_egress + additional_security_group_rules = var.additional_security_group_rules + inline_rules_enabled = var.inline_rules_enabled + + context = module.this.context +} diff --git a/modules/msk/outputs.tf b/modules/msk/outputs.tf new file mode 100644 index 000000000..ffa71a1e5 --- /dev/null +++ b/modules/msk/outputs.tf @@ -0,0 +1,99 @@ +output "cluster_name" { + value = module.kafka.cluster_name + description = "The cluster name of the MSK cluster" +} + +output "cluster_arn" { + value = module.kafka.cluster_arn + description = "Amazon Resource Name (ARN) of the MSK cluster" +} + +output "storage_mode" { + value = module.kafka.storage_mode + description = "Storage mode for supported storage tiers" +} + +output "bootstrap_brokers" { + value = module.kafka.bootstrap_brokers + description = "Comma separated list of one or more hostname:port pairs of Kafka brokers suitable to bootstrap connectivity to the Kafka cluster" +} + +output "bootstrap_brokers_tls" { + value = module.kafka.bootstrap_brokers_tls + description = "Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for access to the Kafka cluster using TLS" +} + +output "bootstrap_brokers_public_tls" { + value = module.kafka.bootstrap_brokers_public_tls + description = "Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for public access to the Kafka cluster using TLS" +} + +output "bootstrap_brokers_sasl_scram" { + value = module.kafka.bootstrap_brokers_sasl_scram + description = "Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for access to the Kafka cluster using SASL/SCRAM" +} + +output "bootstrap_brokers_public_sasl_scram" { + value = module.kafka.bootstrap_brokers_public_sasl_scram + description = "Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for public access to the Kafka cluster using SASL/SCRAM" +} + +output "bootstrap_brokers_sasl_iam" { + value = module.kafka.bootstrap_brokers_sasl_iam + description = "Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for access to the Kafka cluster using SASL/IAM" +} + +output "bootstrap_brokers_public_sasl_iam" { + value = module.kafka.bootstrap_brokers_public_sasl_iam + description = "Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for public access to the Kafka cluster using SASL/IAM" +} + +output "zookeeper_connect_string" { + value = module.kafka.zookeeper_connect_string + description = "Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster" +} + +output "zookeeper_connect_string_tls" { + value = module.kafka.zookeeper_connect_string_tls + description = "Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster via TLS" +} + +output "broker_endpoints" { + value = module.kafka.broker_endpoints + description = "List of broker endpoints" +} + +output "current_version" { + value = module.kafka.current_version + description = "Current version of the MSK Cluster" +} + +output "config_arn" { + value = module.kafka.config_arn + description = "Amazon Resource Name (ARN) of the MSK configuration" +} + +output "latest_revision" { + value = module.kafka.latest_revision + description = "Latest revision of the MSK configuration" +} + +output "hostnames" { + value = module.kafka.hostnames + description = "List of MSK Cluster broker DNS hostnames" +} + +output "security_group_id" { + value = module.kafka.security_group_id + description = "The ID of the created security group" +} + +output "security_group_arn" { + value = module.kafka.security_group_arn + description = "The ARN of the created security group" +} + +output "security_group_name" { + value = module.kafka.security_group_name + description = "The name of the created security group" +} diff --git a/modules/msk/providers.tf b/modules/msk/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/msk/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/msk/remote-state.tf b/modules/msk/remote-state.tf new file mode 100644 index 000000000..26780aa76 --- /dev/null +++ b/modules/msk/remote-state.tf @@ -0,0 +1,18 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.vpc_component_name + + context = module.this.context +} + +module "dns_delegated" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.dns_delegated_component_name + environment = var.dns_delegated_environment_name + + context = module.this.context +} diff --git a/modules/msk/security-group-variables.tf b/modules/msk/security-group-variables.tf new file mode 100644 index 000000000..1b6690947 --- /dev/null +++ b/modules/msk/security-group-variables.tf @@ -0,0 +1,184 @@ +# security-group-variables Version: 3 +# +# Copy this file from https://github.com/cloudposse/terraform-aws-security-group/blob/master/exports/security-group-variables.tf +# and EDIT IT TO SUIT YOUR PROJECT. Update the version number above if you update this file from a later version. +# Unlike null-label context.tf, this file cannot be automatically updated +# because of the tight integration with the module using it. +## +# Delete this top comment block, except for the first line (version number), +# REMOVE COMMENTS below that are intended for the initial implementer and not maintainers or end users. +# +# This file provides the standard inputs that all Cloud Posse Open Source +# Terraform module that create AWS Security Groups should implement. +# This file does NOT provide implementation of the inputs, as that +# of course varies with each module. +# +# This file declares some standard outputs modules should create, +# but the declarations should be moved to `outputs.tf` and of course +# may need to be modified based on the module's use of security-group. +# + + +variable "create_security_group" { + type = bool + description = "Set `true` to create and configure a new security group. If false, `associated_security_group_ids` must be provided." + default = true +} + +variable "associated_security_group_ids" { + type = list(string) + description = <<-EOT + A list of IDs of Security Groups to associate the created resource with, in addition to the created security group. + These security groups will not be modified and, if `create_security_group` is `false`, must have rules providing the desired access. + EOT + default = [] +} + +## +## allowed_* inputs are optional, because the same thing can be accomplished by +## providing `additional_security_group_rules`. However, if the rules this +## module creates are non-trivial (for example, opening ports based on +## feature settings, see https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/blob/3fe23c402cc420799ae721186812482335f78d24/main.tf#L14-L53 ) +## then it makes sense to include these. +## Reasons not to include some or all of these inputs include +## - too hard to implement +## - does not make sense (particularly the IPv6 inputs if the underlying resource does not yet support IPv6) +## - likely to confuse users +## - likely to invite count/for_each issues +variable "allowed_security_group_ids" { + type = list(string) + description = <<-EOT + A list of IDs of Security Groups to allow access to the security group created by this module. + The length of this list must be known at "plan" time. + EOT + default = [] +} + +variable "allowed_cidr_blocks" { + type = list(string) + description = <<-EOT + A list of IPv4 CIDRs to allow access to the security group created by this module. + The length of this list must be known at "plan" time. + EOT + default = [] +} +## End of optional allowed_* ########### + +variable "security_group_name" { + type = list(string) + description = <<-EOT + The name to assign to the created security group. Must be unique within the VPC. + If not provided, will be derived from the `null-label.context` passed in. + If `create_before_destroy` is true, will be used as a name prefix. + EOT + default = [] +} + +variable "security_group_description" { + type = string + description = <<-EOT + The description to assign to the created Security Group. + Warning: Changing the description causes the security group to be replaced. + EOT + default = "Managed by Terraform" +} + +variable "security_group_create_before_destroy" { + type = bool + description = <<-EOT + Set `true` to enable terraform `create_before_destroy` behavior on the created security group. + We only recommend setting this `false` if you are importing an existing security group + that you do not want replaced and therefore need full control over its name. + Note that changing this value will always cause the security group to be replaced. + EOT + default = true +} + +variable "preserve_security_group_id" { + type = bool + description = <<-EOT + When `false` and `security_group_create_before_destroy` is `true`, changes to security group rules + cause a new security group to be created with the new rules, and the existing security group is then + replaced with the new one, eliminating any service interruption. + When `true` or when changing the value (from `false` to `true` or from `true` to `false`), + existing security group rules will be deleted before new ones are created, resulting in a service interruption, + but preserving the security group itself. + **NOTE:** Setting this to `true` does not guarantee the security group will never be replaced, + it only keeps changes to the security group rules from triggering a replacement. + See the [terraform-aws-security-group README](https://github.com/cloudposse/terraform-aws-security-group) for further discussion. + EOT + default = false +} + +variable "security_group_create_timeout" { + type = string + description = "How long to wait for the security group to be created." + default = "10m" +} + +variable "security_group_delete_timeout" { + type = string + description = <<-EOT + How long to retry on `DependencyViolation` errors during security group deletion from + lingering ENIs left by certain AWS services such as Elastic Load Balancing. + EOT + default = "15m" +} + +variable "allow_all_egress" { + type = bool + description = <<-EOT + If `true`, the created security group will allow egress on all ports and protocols to all IP addresses. + If this is false and no egress rules are otherwise specified, then no egress will be allowed. + EOT + default = true +} + +variable "additional_security_group_rules" { + type = list(any) + description = <<-EOT + A list of Security Group rule objects to add to the created security group, in addition to the ones + this module normally creates. (To suppress the module's rules, set `create_security_group` to false + and supply your own security group(s) via `associated_security_group_ids`.) + The keys and values of the objects are fully compatible with the `aws_security_group_rule` resource, except + for `security_group_id` which will be ignored, and the optional "key" which, if provided, must be unique and known at "plan" time. + For more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule + and https://github.com/cloudposse/terraform-aws-security-group. + EOT + default = [] +} + +#### We do not expose an `additional_security_group_rule_matrix` input for a few reasons: +# - It is a convenience and ultimately provides no rules that cannot be provided via `additional_security_group_rules` +# - It is complicated and can, in some situations, create problems for Terraform `for_each` +# - It is difficult to document and easy to make mistakes using it + + +# +# +#### The variables below (but not the outputs) can be omitted if not needed, and may need their descriptions modified +# +# + +############################################################################################# +## Special note about inline_rules_enabled and revoke_rules_on_delete +## +## The security-group inputs inline_rules_enabled and revoke_rules_on_delete should not +## be exposed in other modules unless there is a strong reason for them to be used. +## We discourage the use of inline_rules_enabled and we rarely need or want +## revoke_rules_on_delete, so we do not want to clutter our interface with those inputs. +## +## If someone wants to enable either of those options, they have the option +## of creating a security group configured as they like +## and passing it in as the target security group. +############################################################################################# + +variable "inline_rules_enabled" { + type = bool + description = <<-EOT + NOT RECOMMENDED. Create rules "inline" instead of as separate `aws_security_group_rule` resources. + See [#20046](https://github.com/hashicorp/terraform-provider-aws/issues/20046) for one of several issues with inline rules. + See [this post](https://github.com/hashicorp/terraform-provider-aws/pull/9032#issuecomment-639545250) for details on the difference between inline rules and rule resources. + EOT + default = false +} diff --git a/modules/msk/variables.tf b/modules/msk/variables.tf new file mode 100644 index 000000000..afdbb877a --- /dev/null +++ b/modules/msk/variables.tf @@ -0,0 +1,256 @@ +variable "region" { + type = string + description = "AWS region" + nullable = false +} + +variable "vpc_component_name" { + type = string + description = "The name of the Atmos VPC component" +} + +variable "kafka_version" { + type = string + description = <<-EOT + The desired Kafka software version. + Refer to https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html for more details + EOT + nullable = false +} + +variable "broker_instance_type" { + type = string + description = "The instance type to use for the Kafka brokers" + nullable = false +} + +variable "broker_per_zone" { + type = number + default = 1 + description = "Number of Kafka brokers per zone" + validation { + condition = var.broker_per_zone > 0 + error_message = "The broker_per_zone value must be at least 1." + } + nullable = false +} + +variable "broker_volume_size" { + type = number + default = 1000 + description = "The size in GiB of the EBS volume for the data drive on each broker node" + nullable = false +} + +variable "client_broker" { + type = string + default = "TLS" + description = "Encryption setting for data in transit between clients and brokers. Valid values: `TLS`, `TLS_PLAINTEXT`, and `PLAINTEXT`" + nullable = false +} + +variable "encryption_in_cluster" { + type = bool + default = true + description = "Whether data communication among broker nodes is encrypted" + nullable = false +} + +variable "encryption_at_rest_kms_key_arn" { + type = string + default = "" + description = "You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest" +} + +variable "enhanced_monitoring" { + type = string + default = "DEFAULT" + description = "Specify the desired enhanced MSK CloudWatch monitoring level. Valid values: `DEFAULT`, `PER_BROKER`, and `PER_TOPIC_PER_BROKER`" + nullable = false +} + +variable "certificate_authority_arns" { + type = list(string) + default = [] + description = "List of ACM Certificate Authority Amazon Resource Names (ARNs) to be used for TLS client authentication" + nullable = false +} + +variable "client_allow_unauthenticated" { + type = bool + default = false + description = "Enable unauthenticated access" + nullable = false +} + +variable "client_sasl_scram_enabled" { + type = bool + default = false + description = "Enable SCRAM client authentication via AWS Secrets Manager. Cannot be set to `true` at the same time as `client_tls_auth_enabled`" + nullable = false +} + +variable "client_sasl_scram_secret_association_enabled" { + type = bool + default = true + description = "Enable the list of AWS Secrets Manager secret ARNs for SCRAM authentication" + nullable = false +} + +variable "client_sasl_scram_secret_association_arns" { + type = list(string) + default = [] + description = "List of AWS Secrets Manager secret ARNs for SCRAM authentication" + nullable = false +} + +variable "client_sasl_iam_enabled" { + type = bool + default = false + description = "Enable client authentication via IAM policies. Cannot be set to `true` at the same time as `client_tls_auth_enabled`" + nullable = false +} + +variable "client_tls_auth_enabled" { + type = bool + default = false + description = "Set `true` to enable the Client TLS Authentication" + nullable = false +} + +variable "jmx_exporter_enabled" { + type = bool + default = false + description = "Set `true` to enable the JMX Exporter" + nullable = false +} + +variable "node_exporter_enabled" { + type = bool + default = false + description = "Set `true` to enable the Node Exporter" + nullable = false +} + +variable "cloudwatch_logs_enabled" { + type = bool + default = false + description = "Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs" + nullable = false +} + +variable "cloudwatch_logs_log_group" { + type = string + default = null + description = "Name of the Cloudwatch Log Group to deliver logs to" +} + +variable "firehose_logs_enabled" { + type = bool + default = false + description = "Indicates whether you want to enable or disable streaming broker logs to Kinesis Data Firehose" + nullable = false +} + +variable "firehose_delivery_stream" { + type = string + default = "" + description = "Name of the Kinesis Data Firehose delivery stream to deliver logs to" +} + +variable "s3_logs_enabled" { + type = bool + default = false + description = " Indicates whether you want to enable or disable streaming broker logs to S3" + nullable = false +} + +variable "s3_logs_bucket" { + type = string + default = "" + description = "Name of the S3 bucket to deliver logs to" +} + +variable "s3_logs_prefix" { + type = string + default = "" + description = "Prefix to append to the S3 folder name logs are delivered to" +} + +variable "properties" { + type = map(string) + default = {} + description = "Contents of the server.properties file. Supported properties are documented in the [MSK Developer Guide](https://docs.aws.amazon.com/msk/latest/developerguide/msk-configuration-properties.html)" + nullable = false +} + +variable "autoscaling_enabled" { + type = bool + default = true + description = "To automatically expand your cluster's storage in response to increased usage, you can enable this. [More info](https://docs.aws.amazon.com/msk/latest/developerguide/msk-autoexpand.html)" + nullable = false +} + +variable "storage_autoscaling_target_value" { + type = number + default = 60 + description = "Percentage of storage used to trigger autoscaled storage increase" +} + +variable "storage_autoscaling_max_capacity" { + type = number + default = null + description = "Maximum size the autoscaling policy can scale storage. Defaults to `broker_volume_size`" +} + +variable "storage_autoscaling_disable_scale_in" { + type = bool + default = false + description = "If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource" + nullable = false +} + +variable "security_group_rule_description" { + type = string + default = "Allow inbound %s traffic" + description = "The description to place on each security group rule. The %s will be replaced with the protocol name" + nullable = false +} + +variable "public_access_enabled" { + type = bool + default = false + description = "Enable public access to MSK cluster (given that all of the requirements are met)" + nullable = false +} + +variable "dns_delegated_component_name" { + type = string + description = "The component name of `dns-delegated`" + default = "dns-delegated" +} + +variable "dns_delegated_environment_name" { + type = string + description = "The environment name of `dns-delegated`" + default = "gbl" +} + +variable "broker_dns_records_count" { + type = number + description = <<-EOT + This variable specifies how many DNS records to create for the broker endpoints in the DNS zone provided in the `zone_id` variable. + This corresponds to the total number of broker endpoints created by the module. + Calculate this number by multiplying the `broker_per_zone` variable by the subnet count. + This variable is necessary to prevent the Terraform error: + The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. + EOT + default = 0 + nullable = false +} + +variable "custom_broker_dns_name" { + type = string + description = "Custom Route53 DNS hostname for MSK brokers. Use `%%ID%%` key to specify brokers index in the hostname. Example: `kafka-broker%%ID%%.example.com`" + default = null +} diff --git a/modules/msk/versions.tf b/modules/msk/versions.tf new file mode 100644 index 000000000..cc73ffd35 --- /dev/null +++ b/modules/msk/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/mwaa/README.md b/modules/mwaa/README.md index a6dc52da9..c9fdad24f 100644 --- a/modules/mwaa/README.md +++ b/modules/mwaa/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/mwaa + - layer/unassigned + - provider/aws +--- + # Component: `mwaa` This component provisions Amazon managed workflows for Apache Airflow. @@ -14,9 +21,9 @@ Allows the Airflow UI to be access over the public internet to users granted acc Limits access to users within the VPC to users granted access by an IAM policy. -* MWAA creates a VPC interface endpoint for the Airflow webserver and an interface endpoint for the pgsql metadatabase. +- MWAA creates a VPC interface endpoint for the Airflow webserver and an interface endpoint for the pgsql metadatabase. - the endpoints are created in the AZs mapped to your private subnets -* MWAA binds an IP address from your private subnet to the interface endpoint +- MWAA binds an IP address from your private subnet to the interface endpoint ### Managing access to VPC endpoings on MWAA @@ -41,19 +48,20 @@ components: airflow_version: 2.0.2 ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.0 | ## Modules @@ -63,8 +71,8 @@ components: | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [mwaa\_environment](#module\_mwaa\_environment) | cloudposse/mwaa/aws | 0.4.8 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [vpc\_ingress](#module\_vpc\_ingress) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -88,8 +96,8 @@ components: | [allowed\_web\_access\_role\_names](#input\_allowed\_web\_access\_role\_names) | List of role names to allow airflow web access | `list(string)` | `[]` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [create\_iam\_role](#input\_create\_iam\_role) | Enabling or disabling the creatation of a default IAM Role for AWS MWAA | `bool` | `true` | no | -| [create\_s3\_bucket](#input\_create\_s3\_bucket) | Enabling or disabling the creatation of an S3 bucket for AWS MWAA | `bool` | `true` | no | +| [create\_iam\_role](#input\_create\_iam\_role) | Enabling or disabling the creation of a default IAM Role for AWS MWAA | `bool` | `true` | no | +| [create\_s3\_bucket](#input\_create\_s3\_bucket) | Enabling or disabling the creation of an S3 bucket for AWS MWAA | `bool` | `true` | no | | [dag\_processing\_logs\_enabled](#input\_dag\_processing\_logs\_enabled) | Enabling or disabling the collection of logs for processing DAGs | `bool` | `false` | no | | [dag\_processing\_logs\_level](#input\_dag\_processing\_logs\_level) | DAG processing logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG | `string` | `"INFO"` | no | | [dag\_s3\_path](#input\_dag\_s3\_path) | Path to dags in s3 | `string` | `"dags"` | no | @@ -100,8 +108,6 @@ components: | [environment\_class](#input\_environment\_class) | Environment class for the cluster. Possible options are mw1.small, mw1.medium, mw1.large. | `string` | `"mw1.small"` | no | | [execution\_role\_arn](#input\_execution\_role\_arn) | If `create_iam_role` is `false` then set this to the target MWAA execution role | `string` | `""` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -146,10 +152,11 @@ components: | [tags\_all](#output\_tags\_all) | A map of tags assigned to the resource, including those inherited from the provider for the Amazon MWAA Environment | | [webserver\_url](#output\_webserver\_url) | The webserver URL of the Amazon MWAA Environment | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/TODO) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/TODO) - + Cloud Posse's upstream component -[](https://cpco.io/component) \ No newline at end of file +[](https://cpco.io/component) diff --git a/modules/mwaa/providers.tf b/modules/mwaa/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/mwaa/providers.tf +++ b/modules/mwaa/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/mwaa/remote-state.tf b/modules/mwaa/remote-state.tf index a25614047..f02894659 100644 --- a/modules/mwaa/remote-state.tf +++ b/modules/mwaa/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "vpc" @@ -9,7 +9,7 @@ module "vpc" { module "vpc_ingress" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" for_each = toset(var.allow_ingress_from_vpc_stages) diff --git a/modules/mwaa/variables.tf b/modules/mwaa/variables.tf index 214a7c653..eceb0313a 100644 --- a/modules/mwaa/variables.tf +++ b/modules/mwaa/variables.tf @@ -5,13 +5,13 @@ variable "region" { variable "create_s3_bucket" { type = bool - description = "Enabling or disabling the creatation of an S3 bucket for AWS MWAA" + description = "Enabling or disabling the creation of an S3 bucket for AWS MWAA" default = true } variable "create_iam_role" { type = bool - description = "Enabling or disabling the creatation of a default IAM Role for AWS MWAA" + description = "Enabling or disabling the creation of a default IAM Role for AWS MWAA" default = true } diff --git a/modules/mwaa/versions.tf b/modules/mwaa/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/mwaa/versions.tf +++ b/modules/mwaa/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/network-firewall/README.md b/modules/network-firewall/README.md new file mode 100644 index 000000000..b95a0e63e --- /dev/null +++ b/modules/network-firewall/README.md @@ -0,0 +1,336 @@ +--- +tags: + - component/network-firewall + - layer/unassigned + - provider/aws +--- + +# Component: `network-firewall` + +This component is responsible for provisioning [AWS Network Firewall](https://aws.amazon.com/network-firewal) resources, +including Network Firewall, firewall policy, rule groups, and logging configuration. + +## Usage + +**Stack Level**: Regional + +Example of a Network Firewall with stateful 5-tuple rules: + +> [!TIP] +> +> The "5-tuple" means the five items (columns) that each rule (row, or tuple) in a firewall policy uses to define +> whether to block or allow traffic: source and destination IP, source and destination port, and protocol. +> +> Refer to +> [Standard stateful rule groups in AWS Network Firewall](https://docs.aws.amazon.com/network-firewall/latest/developerguide/stateful-rule-groups-basic.html) +> for more details. + +```yaml +components: + terraform: + network-firewall: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: network-firewall + # The name of a VPC component where the Network Firewall is provisioned + vpc_component_name: vpc + firewall_subnet_name: "firewall" + stateful_default_actions: + - "aws:alert_strict" + stateless_default_actions: + - "aws:forward_to_sfe" + stateless_fragment_default_actions: + - "aws:forward_to_sfe" + stateless_custom_actions: [] + delete_protection: false + firewall_policy_change_protection: false + subnet_change_protection: false + logging_config: [] + rule_group_config: + stateful-packet-inspection: + capacity: 50 + name: stateful-packet-inspection + description: "Stateful inspection of packets" + type: "STATEFUL" + rule_group: + stateful_rule_options: + rule_order: "STRICT_ORDER" + rules_source: + stateful_rule: + - action: "DROP" + header: + destination: "124.1.1.24/32" + destination_port: 53 + direction: "ANY" + protocol: "TCP" + source: "1.2.3.4/32" + source_port: 53 + rule_option: + keyword: "sid:1" + - action: "PASS" + header: + destination: "ANY" + destination_port: "ANY" + direction: "ANY" + protocol: "TCP" + source: "10.10.192.0/19" + source_port: "ANY" + rule_option: + keyword: "sid:2" + - action: "PASS" + header: + destination: "ANY" + destination_port: "ANY" + direction: "ANY" + protocol: "TCP" + source: "10.10.224.0/19" + source_port: "ANY" + rule_option: + keyword: "sid:3" +``` + +Example of a Network Firewall with [Suricata](https://suricata.readthedocs.io/en/suricata-6.0.0/rules/) rules: + +> [!TIP] +> +> For [Suricata](https://suricata.io/) rule group type, you provide match and action settings in a string, in a Suricata +> compatible specification. The specification fully defines what the stateful rules engine looks for in a traffic flow +> and the action to take on the packets in a flow that matches the inspection criteria. +> +> Refer to +> [Suricata compatible rule strings in AWS Network Firewall](https://docs.aws.amazon.com/network-firewall/latest/developerguide/stateful-rule-groups-suricata.html) +> for more details. + +```yaml +components: + terraform: + network-firewall: + metadata: + component: "network-firewall" + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + name: "network-firewall" + + # The name of a VPC component where the Network Firewall is provisioned + vpc_component_name: "vpc" + firewall_subnet_name: "firewall" + + delete_protection: false + firewall_policy_change_protection: false + subnet_change_protection: false + + # Logging config + logging_enabled: true + flow_logs_bucket_component_name: "network-firewall-logs-bucket-flow" + alert_logs_bucket_component_name: "network-firewall-logs-bucket-alert" + + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/stateless-default-actions.html + # https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_FirewallPolicy.html + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/rule-action.html#rule-action-stateless + stateless_default_actions: + - "aws:forward_to_sfe" + stateless_fragment_default_actions: + - "aws:forward_to_sfe" + stateless_custom_actions: [] + + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html#suricata-strict-rule-evaluation-order.html + # https://github.com/aws-samples/aws-network-firewall-strict-rule-ordering-terraform + policy_stateful_engine_options_rule_order: "STRICT_ORDER" + + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/stateful-default-actions.html + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html#suricata-default-rule-evaluation-order + # https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_FirewallPolicy.html + stateful_default_actions: + - "aws:alert_established" + # - "aws:alert_strict" + # - "aws:drop_established" + # - "aws:drop_strict" + + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/rule-groups.html + rule_group_config: + stateful-inspection: + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/rule-group-managing.html#nwfw-rule-group-capacity + # For stateful rules, `capacity` means the max number of rules in the rule group + capacity: 1000 + name: "stateful-inspection" + description: "Stateful inspection of packets" + type: "STATEFUL" + + rule_group: + rule_variables: + port_sets: [] + ip_sets: + - key: "CIDR_1" + definition: + - "10.10.0.0/11" + - key: "CIDR_2" + definition: + - "10.11.0.0/11" + - key: "SCANNER" + definition: + - "10.12.48.186/32" + # bad actors + - key: "BLOCKED_LIST" + definition: + - "193.142.146.35/32" + - "69.40.195.236/32" + - "125.17.153.207/32" + - "185.220.101.4/32" + - "195.219.212.151/32" + - "162.247.72.199/32" + - "147.185.254.17/32" + - "179.60.147.101/32" + - "157.230.244.66/32" + - "192.99.4.116/32" + - "62.102.148.69/32" + - "185.129.62.62/32" + + stateful_rule_options: + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html#suricata-strict-rule-evaluation-order.html + # All the stateful rule groups are provided to the rule engine as Suricata compatible strings + # Suricata can evaluate stateful rule groups by using the default rule group ordering method, + # or you can set an exact order using the strict ordering method. + # The settings for your rule groups must match the settings for the firewall policy that they belong to. + # With strict ordering, the rule groups are evaluated by order of priority, starting from the lowest number, + # and the rules in each rule group are processed in the order in which they're defined. + rule_order: "STRICT_ORDER" + + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-how-to-provide-rules.html + rules_source: + # Suricata rules for the rule group + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-examples.html + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/suricata-rule-evaluation-order.html + # https://github.com/aws-samples/aws-network-firewall-terraform/blob/main/firewall.tf#L66 + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/stateful-rule-groups-suricata.html + # https://coralogix.com/blog/writing-effective-suricata-rules-for-the-sta/ + # https://suricata.readthedocs.io/en/suricata-6.0.10/rules/intro.html + # https://suricata.readthedocs.io/en/suricata-6.0.0/rules/header-keywords.html + # https://docs.aws.amazon.com/network-firewall/latest/developerguide/rule-action.html + # + # With Strict evaluation order, the rules in each rule group are processed in the order in which they're defined + # + # Pass – Discontinue inspection of the matching packet and permit it to go to its intended destination + # + # Drop or Alert – Evaluate the packet against all rules with drop or alert action settings. + # If the firewall has alert logging configured, send a message to the firewall's alert logs for each matching rule. + # The first log entry for the packet will be for the first rule that matched the packet. + # After all rules have been evaluated, handle the packet according to the action setting in the first rule that matched the packet. + # If the first rule has a drop action, block the packet. If it has an alert action, continue evaluation. + # + # Reject – Drop traffic that matches the conditions of the stateful rule and send a TCP reset packet back to sender of the packet. + # A TCP reset packet is a packet with no payload and a RST bit contained in the TCP header flags. + # Reject is available only for TCP traffic. This option doesn't support FTP and IMAP protocols. + rules_string: | + alert ip $BLOCKED_LIST any <> any any ( msg:"Alert on blocked traffic"; sid:100; rev:1; ) + drop ip $BLOCKED_LIST any <> any any ( msg:"Blocked blocked traffic"; sid:200; rev:1; ) + + pass ip $SCANNER any -> any any ( msg: "Allow scanner"; sid:300; rev:1; ) + + alert ip $CIDR_1 any -> $CIDR_2 any ( msg:"Alert on CIDR_1 to CIDR_2 traffic"; sid:400; rev:1; ) + drop ip $CIDR_1 any -> $CIDR_2 any ( msg:"Blocked CIDR_1 to CIDR_2 traffic"; sid:410; rev:1; ) + + pass ip any any <> any any ( msg: "Allow general traffic"; sid:10000; rev:1; ) +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [alert\_logs\_bucket](#module\_alert\_logs\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [flow\_logs\_bucket](#module\_flow\_logs\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [network\_firewall](#module\_network\_firewall) | cloudposse/network-firewall/aws | 0.3.2 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [alert\_logs\_bucket\_component\_name](#input\_alert\_logs\_bucket\_component\_name) | Alert logs bucket component name | `string` | `null` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delete\_protection](#input\_delete\_protection) | A boolean flag indicating whether it is possible to delete the firewall | `bool` | `false` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [firewall\_policy\_change\_protection](#input\_firewall\_policy\_change\_protection) | A boolean flag indicating whether it is possible to change the associated firewall policy | `bool` | `false` | no | +| [firewall\_subnet\_name](#input\_firewall\_subnet\_name) | Firewall subnet name | `string` | `"firewall"` | no | +| [flow\_logs\_bucket\_component\_name](#input\_flow\_logs\_bucket\_component\_name) | Flow logs bucket component name | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [logging\_enabled](#input\_logging\_enabled) | Flag to enable/disable Network Firewall Flow and Alert Logs | `bool` | `false` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [network\_firewall\_description](#input\_network\_firewall\_description) | AWS Network Firewall description. If not provided, the Network Firewall name will be used | `string` | `null` | no | +| [network\_firewall\_name](#input\_network\_firewall\_name) | Friendly name to give the Network Firewall. If not provided, the name will be derived from the context.
Changing the name will cause the Firewall to be deleted and recreated. | `string` | `null` | no | +| [network\_firewall\_policy\_name](#input\_network\_firewall\_policy\_name) | Friendly name to give the Network Firewall policy. If not provided, the name will be derived from the context.
Changing the name will cause the policy to be deleted and recreated. | `string` | `null` | no | +| [policy\_stateful\_engine\_options\_rule\_order](#input\_policy\_stateful\_engine\_options\_rule\_order) | Indicates how to manage the order of stateful rule evaluation for the policy. Valid values: DEFAULT\_ACTION\_ORDER, STRICT\_ORDER | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [rule\_group\_config](#input\_rule\_group\_config) | Rule group configuration. Refer to [networkfirewall\_rule\_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_rule_group) for configuration details | `any` | n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [stateful\_default\_actions](#input\_stateful\_default\_actions) | Default stateful actions | `list(string)` |
[
"aws:alert_strict"
]
| no | +| [stateless\_custom\_actions](#input\_stateless\_custom\_actions) | Set of configuration blocks describing the custom action definitions that are available for use in the firewall policy's `stateless_default_actions` |
list(object({
action_name = string
dimensions = list(string)
}))
| `[]` | no | +| [stateless\_default\_actions](#input\_stateless\_default\_actions) | Default stateless actions | `list(string)` |
[
"aws:forward_to_sfe"
]
| no | +| [stateless\_fragment\_default\_actions](#input\_stateless\_fragment\_default\_actions) | Default stateless actions for fragmented packets | `list(string)` |
[
"aws:forward_to_sfe"
]
| no | +| [subnet\_change\_protection](#input\_subnet\_change\_protection) | A boolean flag indicating whether it is possible to change the associated subnet(s) | `bool` | `false` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | The name of a VPC component where the Network Firewall is provisioned | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [az\_subnet\_endpoint\_stats](#output\_az\_subnet\_endpoint\_stats) | List of objects with each object having three items: AZ, subnet ID, VPC endpoint ID | +| [network\_firewall\_arn](#output\_network\_firewall\_arn) | Network Firewall ARN | +| [network\_firewall\_name](#output\_network\_firewall\_name) | Network Firewall name | +| [network\_firewall\_policy\_arn](#output\_network\_firewall\_policy\_arn) | Network Firewall policy ARN | +| [network\_firewall\_policy\_name](#output\_network\_firewall\_policy\_name) | Network Firewall policy name | +| [network\_firewall\_status](#output\_network\_firewall\_status) | Nested list of information about the current status of the Network Firewall | + + + +## References + +- [Deploy centralized traffic filtering using AWS Network Firewall](https://aws.amazon.com/blogs/networking-and-content-delivery/deploy-centralized-traffic-filtering-using-aws-network-firewall) +- [AWS Network Firewall – New Managed Firewall Service in VPC](https://aws.amazon.com/blogs/aws/aws-network-firewall-new-managed-firewall-service-in-vpc) +- [Deployment models for AWS Network Firewall](https://aws.amazon.com/blogs/networking-and-content-delivery/deployment-models-for-aws-network-firewall) +- [Deployment models for AWS Network Firewall with VPC routing enhancements](https://aws.amazon.com/blogs/networking-and-content-delivery/deployment-models-for-aws-network-firewall-with-vpc-routing-enhancements) +- [Inspection Deployment Models with AWS Network Firewall](https://d1.awsstatic.com/architecture-diagrams/ArchitectureDiagrams/inspection-deployment-models-with-AWS-network-firewall-ra.pdf) +- [How to deploy AWS Network Firewall by using AWS Firewall Manager](https://aws.amazon.com/blogs/security/how-to-deploy-aws-network-firewall-by-using-aws-firewall-manager) +- [A Deep Dive into AWS Transit Gateway](https://www.youtube.com/watch?v=a55Iud-66q0) +- [Appliance in a shared services VPC](https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-appliance-scenario.html) +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/TODO) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/network-firewall/context.tf b/modules/network-firewall/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/network-firewall/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/network-firewall/main.tf b/modules/network-firewall/main.tf new file mode 100644 index 000000000..5379a76d4 --- /dev/null +++ b/modules/network-firewall/main.tf @@ -0,0 +1,55 @@ +locals { + enabled = module.this.enabled + logging_enabled = local.enabled && var.logging_enabled + + # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_logging_configuration + logging_config = local.logging_enabled ? { + flow = { + log_destination_type = "S3" + log_type = "FLOW" + log_destination = { + bucketName = try(module.flow_logs_bucket.outputs.bucket_id, "") + prefix = null + } + }, + alert = { + log_destination_type = "S3" + log_type = "ALERT" + log_destination = { + bucketName = try(module.alert_logs_bucket.outputs.bucket_id, "") + prefix = null + } + } + } : {} + + vpc_outputs = module.vpc.outputs + firewall_subnet_ids = local.vpc_outputs.named_private_subnets_map[var.firewall_subnet_name] +} + +module "network_firewall" { + source = "cloudposse/network-firewall/aws" + version = "0.3.2" + + vpc_id = local.vpc_outputs.vpc_id + subnet_ids = local.firewall_subnet_ids + + network_firewall_name = var.network_firewall_name + network_firewall_description = var.network_firewall_description + network_firewall_policy_name = var.network_firewall_policy_name + policy_stateful_engine_options_rule_order = var.policy_stateful_engine_options_rule_order + stateful_default_actions = var.stateful_default_actions + stateless_default_actions = var.stateless_default_actions + stateless_fragment_default_actions = var.stateless_fragment_default_actions + stateless_custom_actions = var.stateless_custom_actions + delete_protection = var.delete_protection + firewall_policy_change_protection = var.firewall_policy_change_protection + subnet_change_protection = var.subnet_change_protection + + # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_logging_configuration + logging_config = local.logging_config + + # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_rule_group + rule_group_config = var.rule_group_config + + context = module.this.context +} diff --git a/modules/network-firewall/outputs.tf b/modules/network-firewall/outputs.tf new file mode 100644 index 000000000..e9d0c1e94 --- /dev/null +++ b/modules/network-firewall/outputs.tf @@ -0,0 +1,29 @@ +output "network_firewall_name" { + description = "Network Firewall name" + value = module.network_firewall.network_firewall_name +} + +output "network_firewall_arn" { + description = "Network Firewall ARN" + value = module.network_firewall.network_firewall_arn +} + +output "network_firewall_status" { + description = "Nested list of information about the current status of the Network Firewall" + value = module.network_firewall.network_firewall_status +} + +output "network_firewall_policy_name" { + description = "Network Firewall policy name" + value = module.network_firewall.network_firewall_policy_name +} + +output "network_firewall_policy_arn" { + description = "Network Firewall policy ARN" + value = module.network_firewall.network_firewall_policy_arn +} + +output "az_subnet_endpoint_stats" { + description = "List of objects with each object having three items: AZ, subnet ID, VPC endpoint ID" + value = module.network_firewall.az_subnet_endpoint_stats +} diff --git a/modules/network-firewall/providers.tf b/modules/network-firewall/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/network-firewall/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/network-firewall/remote-state.tf b/modules/network-firewall/remote-state.tf new file mode 100644 index 000000000..da533f575 --- /dev/null +++ b/modules/network-firewall/remote-state.tf @@ -0,0 +1,40 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.vpc_component_name + + context = module.this.context +} + +module "flow_logs_bucket" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.flow_logs_bucket_component_name + + bypass = !local.logging_enabled || var.flow_logs_bucket_component_name == null || var.flow_logs_bucket_component_name == "" + + defaults = { + bucket_id = "" + bucket_arn = "" + } + + context = module.this.context +} + +module "alert_logs_bucket" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.alert_logs_bucket_component_name + + bypass = !local.logging_enabled || var.alert_logs_bucket_component_name == null || var.alert_logs_bucket_component_name == "" + + defaults = { + bucket_id = "" + bucket_arn = "" + } + + context = module.this.context +} diff --git a/modules/network-firewall/variables.tf b/modules/network-firewall/variables.tf new file mode 100644 index 000000000..36d54b7ca --- /dev/null +++ b/modules/network-firewall/variables.tf @@ -0,0 +1,113 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "vpc_component_name" { + type = string + description = "The name of a VPC component where the Network Firewall is provisioned" +} + +variable "network_firewall_name" { + type = string + description = <<-EOT + Friendly name to give the Network Firewall. If not provided, the name will be derived from the context. + Changing the name will cause the Firewall to be deleted and recreated. + EOT + default = null +} + +variable "network_firewall_description" { + type = string + description = "AWS Network Firewall description. If not provided, the Network Firewall name will be used" + default = null +} + +variable "network_firewall_policy_name" { + type = string + description = <<-EOT + Friendly name to give the Network Firewall policy. If not provided, the name will be derived from the context. + Changing the name will cause the policy to be deleted and recreated. + EOT + default = null +} + +variable "policy_stateful_engine_options_rule_order" { + type = string + description = "Indicates how to manage the order of stateful rule evaluation for the policy. Valid values: DEFAULT_ACTION_ORDER, STRICT_ORDER" + default = null +} + +variable "stateful_default_actions" { + type = list(string) + description = "Default stateful actions" + default = ["aws:alert_strict"] +} + +variable "stateless_default_actions" { + type = list(string) + description = "Default stateless actions" + default = ["aws:forward_to_sfe"] +} + +variable "stateless_fragment_default_actions" { + type = list(string) + description = "Default stateless actions for fragmented packets" + default = ["aws:forward_to_sfe"] +} + +variable "stateless_custom_actions" { + type = list(object({ + action_name = string + dimensions = list(string) + })) + description = "Set of configuration blocks describing the custom action definitions that are available for use in the firewall policy's `stateless_default_actions`" + default = [] +} + +variable "delete_protection" { + type = bool + description = "A boolean flag indicating whether it is possible to delete the firewall" + default = false +} + +variable "firewall_policy_change_protection" { + type = bool + description = "A boolean flag indicating whether it is possible to change the associated firewall policy" + default = false +} + +variable "subnet_change_protection" { + type = bool + description = "A boolean flag indicating whether it is possible to change the associated subnet(s)" + default = false +} + +variable "rule_group_config" { + type = any + description = "Rule group configuration. Refer to [networkfirewall_rule_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_rule_group) for configuration details" +} + +variable "logging_enabled" { + type = bool + description = "Flag to enable/disable Network Firewall Flow and Alert Logs" + default = false +} + +variable "flow_logs_bucket_component_name" { + type = string + description = "Flow logs bucket component name" + default = null +} + +variable "alert_logs_bucket_component_name" { + type = string + description = "Alert logs bucket component name" + default = null +} + +variable "firewall_subnet_name" { + type = string + description = "Firewall subnet name" + default = "firewall" +} diff --git a/modules/network-firewall/versions.tf b/modules/network-firewall/versions.tf new file mode 100644 index 000000000..cc73ffd35 --- /dev/null +++ b/modules/network-firewall/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/opsgenie-team/CHANGELOG.md b/modules/opsgenie-team/CHANGELOG.md new file mode 100644 index 000000000..209f811aa --- /dev/null +++ b/modules/opsgenie-team/CHANGELOG.md @@ -0,0 +1,13 @@ +## Changes in PR #889, expected Component version ~1.334.0 + +### `team` replaced with `team_options` + +The `team` variable has been replaced with `team_options` to reduce confusion. The component only ever creates at most +one team, with the name specified in the `name` variable. The `team` variable was introduced to provide a single object +to specify other options, but was not implemented properly. + +### Team membership now managed by this component by default + +Previously, the default behavior was to not manage team membership, allowing users to be managed via the Opsgenie UI. +Now the default is to manage via the `members` input. To restore the previous behavior, set +`team_options.ignore_members` to `true`. diff --git a/modules/opsgenie-team/README.md b/modules/opsgenie-team/README.md index 43d82e7d4..74bd1dc15 100644 --- a/modules/opsgenie-team/README.md +++ b/modules/opsgenie-team/README.md @@ -1,18 +1,69 @@ +--- +tags: + - component/opsgenie-team + - layer/unassigned + - provider/aws +--- + # Component: `opsgenie-team` This component is responsible for provisioning Opsgenie teams and related services, rules, schedules. ## Usage +#### Pre-requisites + +You need an API Key stored in `/opsgenie/opsgenie_api_key` of SSM, this is configurable using the +`ssm_parameter_name_format` and `ssm_path` variables. + +Opsgenie is now part of Atlassian, so you need to make sure you are creating an Opsgenie API Key, which looks like +`abcdef12-3456-7890-abcd-ef0123456789` and not an Atlassian API key, which looks like + +```shell +ATAfT3xFfGF0VFXAfl8EmQNPVv1Hlazp3wsJgTmM8Ph7iP-RtQyiEfw-fkDS2LvymlyUOOhc5XiSx46vQWnznCJolq-GMX4KzdvOSPhEWr-BF6LEkJQC4CSjDJv0N7d91-0gVekNmCD2kXY9haUHUSpO4H7X6QxyImUb9VmOKIWTbQi8rf4CF28=63CB21B9 +``` + +Generate an API Key by going to Settings -> API key management on your Opsgenie control panel, which will have an +address like `https://.app.opsgenie.com/settings/api-key-management`, and click the "Add new API key" button. +For more information, see the +[Opsgenie API key management documentation](https://support.atlassian.com/opsgenie/docs/api-key-management/). + +Once you have the key, you'll need to test it with a curl to verify that you are at least on a Standard plan with +OpsGenie: + +``` +curl -X GET 'https://api.opsgenie.com/v2/account' \ + --header "Authorization: GenieKey $API_KEY" +``` + +The result should be something similar to below: + +``` +{ + "data": { + "name": "opsgenie", + "plan": { + "maxUserCount": 1500, + "name": "Enterprise", + ... +} +``` + +If you see `Free` or `Essentials` in the plan, then you won't be able to use this component. You can see more details +here: [OpsGenie pricing/features](https://www.atlassian.com/software/opsgenie/pricing#) + +#### Getting Started + **Stack Level**: Global Here's an example snippet for how to use this component. -This component should only be applied once as the resources it creates are regional, but it works with integrations. This is typically done via the auto or corp stack (e.g. `gbl-auto.yaml`). +This component should only be applied once as the resources it creates are regional, but it works with integrations. +This is typically done via the auto or corp stack (e.g. `gbl-auto.yaml`). ```yaml # 9-5 Mon-Fri -business_hours: &buisness_hours +business_hours: &business_hours type: "weekday-and-time-of-day" restrictions: - start_hour: 9 @@ -31,6 +82,7 @@ waking_hours: &waking_hours end_hour: 17 end_min: 00 +# This is a partial incident mapping, we use this as a base to add P1 & P2 below. This is not a complete mapping as there is no P0 priority_level_to_incident: &priority_level_to_incident enabled: true type: incident @@ -72,7 +124,7 @@ components: opsgenie-team-defaults: metadata: type: abstract - component: opsgenie + component: opsgenie-team vars: schedules: @@ -81,6 +133,8 @@ components: description: "London Schedule" timezone: "Europe/London" + # Routing Rules determine how alerts are routed to the team, + # this includes priority changes, incident mappings, and schedules. routing_rules: london_schedule: enabled: false @@ -99,6 +153,8 @@ components: expected_value: P2 # Since Incidents require a service, we create a rule for every `routing_rule` type `incident` for every service on the team. + # This is done behind the scenes by the `opsgenie-team` component. + # These rules below map P1 & P2 to incidents, using yaml anchors from above. p1: *p1_is_incident p2: *p2_is_incident @@ -124,38 +180,37 @@ components: enabled: true name: otherteam_escalation description: Other team escalation - rule: + rules: condition: if-not-acked notify_type: default delay: 60 recipients: - - type: team - name: otherteam + - type: team + name: otherteam yaep_escalation: enabled: true name: yaep_escalation description: Yet another escalation policy - rule: + rules: condition: if-not-acked notify_type: default delay: 90 recipients: - - type: user - name: user@example.com + - type: user + name: user@example.com schedule_escalation: enabled: true name: schedule_escalation description: Schedule escalation policy - rule: + rules: condition: if-not-acked notify_type: default delay: 30 recipients: - - type: schedule - name: secondary_on_call - + - type: schedule + name: secondary_on_call ``` The API keys relating to the Opsgenie Integrations are stored in SSM Parameter Store and can be accessed via chamber. @@ -165,33 +220,37 @@ AWS_PROFILE=foo chamber list opsgenie-team/ ``` ### ClickOps Work - - The initial Setup requires ClickOps to setup the datadog integration on the datadog side. This is a limitation because there isn’t a resource for datadog to create an opsgenie integration so this has to be done manually via ClickOps. (See Limitations Below) - - After deploying the opsgenie-team component the created team will have a schedule named after the team. This is purposely left to be clickOps’d so the UI can be used to set who is on call, as that is the usual way (not through code). Additionally We do not want a re-apply of the terraform to delete or shuffle who is planned to be on call, thus we left who is on-call on a schedule out of the component. + +- After deploying the opsgenie-team component the created team will have a schedule named after the team. This is + purposely left to be clickOps’d so the UI can be used to set who is on call, as that is the usual way (not through + code). Additionally, we do not want a re-apply of the Terraform to delete or shuffle who is planned to be on call, + thus we left who is on-call on a schedule out of the component. ## Known Issues ### Different API Endpoints in Use The problem is there are 3 different api endpoints in use + - `/webapp` - the most robust - only exposed to the UI (that we've seen) - `/v2/` - robust with some differences from `webapp` - `/v1/` - the oldest and furthest from the live UI. +### Cannot create users + +This module does not create users. Users must have already been created to be added to a team. + ### Cannot Add dependent Services - Api Currently doesn't support Multiple ServiceIds for incident Rules ### Cannot Add Stakeholders - - Track the issue: https://github.com/opsgenie/terraform-provider-opsgenie/issues/278 - -### There isn’t a resource for datadog to create an opsgenie integration so this has to be done manually via ClickOps - - - Track the issue: https://github.com/DataDog/terraform-provider-datadog/issues/836 +- Track the issue: https://github.com/opsgenie/terraform-provider-opsgenie/issues/278 ### No Resource to create Slack Integration - - Track the issue: https://github.com/DataDog/terraform-provider-datadog/issues/67 +- Track the issue: https://github.com/DataDog/terraform-provider-datadog/issues/67 ### Out of Date Terraform Docs @@ -200,10 +259,12 @@ Another Problem is the terraform docs are not always up to date with the provide The OpsGenie Provider uses a mix of `/v1` and `/v2`. This means there are many things you can only do from the UI. Listed below in no particular order -- Incident Routing cannot add dependent services - in `v1` and `v2` a `service_incident_rule` object has `serviceId` as type string, in webapp this becomes `serviceIds` of type `list(string)` + +- Incident Routing cannot add dependent services - in `v1` and `v2` a `service_incident_rule` object has `serviceId` as + type string, in webapp this becomes `serviceIds` of type `list(string)` - Opsgenie Provider appears to be inconsistent with how it uses `time_restriction`: - - `restrictions` for type `weekday-and-time-of-day` - - `restriction` for type `time-of-day` + - `restrictions` for type `weekday-and-time-of-day` + - `restriction` for type `time-of-day` Unfortunately none of this is in the terraform docs, and was found via errors and digging through source code. @@ -215,49 +276,54 @@ We recommend to use the human readable timezone such as `Europe/London`. - Setting a schedule to a GMT-style timezone with offsets can cause inconsistent plans. - Setting the timezone to `Etc/GMT+1` instead of `Europe/London`, will lead to permadrift as OpsGenie converts the GMT offsets to regional timezones at deploy-time. In the previous deploy, the GMT style get converted to `Atlantic/Cape_Verde`. + Setting the timezone to `Etc/GMT+1` instead of `Europe/London`, will lead to permadrift as OpsGenie converts the GMT + offsets to regional timezones at deploy-time. In the previous deploy, the GMT style get converted to + `Atlantic/Cape_Verde`. - ```hcl - # module.routing["london_schedule"].module.team_routing_rule[0].opsgenie_team_routing_rule.this[0] will be updated in-place - ~ resource "opsgenie_team_routing_rule" "this" { - id = "4b4c4454-8ccf-41a9-b856-02bec6419ba7" - name = "london_schedule" - ~ timezone = "Atlantic/Cape_Verde" -> "Etc/GMT+1" - # (2 unchanged attributes hidden) - ``` + ```hcl + # module.routing["london_schedule"].module.team_routing_rule[0].opsgenie_team_routing_rule.this[0] will be updated in-place + ~ resource "opsgenie_team_routing_rule" "this" { + id = "4b4c4454-8ccf-41a9-b856-02bec6419ba7" + name = "london_schedule" + ~ timezone = "Atlantic/Cape_Verde" -> "Etc/GMT+1" + # (2 unchanged attributes hidden) + ``` - Some GMT styles will not cause a timezone change on subsequent applies such as `Etc/GMT+8` for `Asia/Taipei`. + Some GMT styles will not cause a timezone change on subsequent applies such as `Etc/GMT+8` for `Asia/Taipei`. -- If the calendar date has crossed daylight savings time, the `Etc/GMT+` GMT style will need to be updated to reflect the correct timezone. +- If the calendar date has crossed daylight savings time, the `Etc/GMT+` GMT style will need to be updated to reflect + the correct timezone. Track the issue: https://github.com/opsgenie/terraform-provider-opsgenie/issues/258 - + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [datadog](#requirement\_datadog) | >= 3.3.0 | | [opsgenie](#requirement\_opsgenie) | >= 0.6.7 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.9.0 | +| [datadog](#provider\_datadog) | >= 3.3.0 | | [opsgenie](#provider\_opsgenie) | >= 0.6.7 | ## Modules | Name | Source | Version | |------|--------|---------| +| [datadog\_configuration](#module\_datadog\_configuration) | ../datadog-configuration/modules/datadog_keys | n/a | | [escalation](#module\_escalation) | ./modules/escalation | n/a | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [integration](#module\_integration) | ./modules/integration | n/a | -| [introspection](#module\_introspection) | cloudposse/label/null | 0.25.0 | -| [members\_merge](#module\_members\_merge) | cloudposse/config/yaml//modules/deepmerge | 1.0.1 | +| [members\_merge](#module\_members\_merge) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | | [routing](#module\_routing) | ./modules/routing | n/a | | [schedule](#module\_schedule) | cloudposse/incident-management/opsgenie//modules/schedule | 0.16.0 | | [service](#module\_service) | cloudposse/incident-management/opsgenie//modules/service | 0.16.0 | @@ -268,7 +334,9 @@ Track the issue: https://github.com/opsgenie/terraform-provider-opsgenie/issues/ | Name | Type | |------|------| +| [datadog_integration_opsgenie_service_object.fake_service_name](https://registry.terraform.io/providers/datadog/datadog/latest/docs/resources/integration_opsgenie_service_object) | resource | | [aws_ssm_parameter.opsgenie_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.opsgenie_team_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | | [opsgenie_team.existing](https://registry.terraform.io/providers/opsgenie/opsgenie/latest/docs/data-sources/team) | data source | | [opsgenie_user.team_members](https://registry.terraform.io/providers/opsgenie/opsgenie/latest/docs/data-sources/user) | data source | @@ -280,14 +348,13 @@ Track the issue: https://github.com/opsgenie/terraform-provider-opsgenie/issues/ | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [create\_only\_integrations\_enabled](#input\_create\_only\_integrations\_enabled) | Whether to reuse all existing resources and only create new integrations | `bool` | `false` | no | +| [datadog\_integration\_enabled](#input\_datadog\_integration\_enabled) | Whether to enable Datadog integration with opsgenie (datadog side) | `bool` | `true` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [escalations](#input\_escalations) | Escalations to configure and create for the team. | `map(any)` | `{}` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [integrations](#input\_integrations) | API Integrations for the team. If not specified, `datadog` is assumed. | `map(any)` | `{}` | no | | [integrations\_enabled](#input\_integrations\_enabled) | Whether to enable the integrations submodule or not | `bool` | `true` | no | | [kms\_key\_arn](#input\_kms\_key\_arn) | AWS KMS key used for writing to SSM | `string` | `"alias/aws/ssm"` | no | @@ -300,7 +367,6 @@ Track the issue: https://github.com/opsgenie/terraform-provider-opsgenie/issues/ | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [required\_tags](#input\_required\_tags) | List of required tag names | `list(string)` | `[]` | no | | [routing\_rules](#input\_routing\_rules) | Routing Rules for the team | `any` | `null` | no | | [schedules](#input\_schedules) | Schedules to create for the team | `map(any)` | `{}` | no | | [services](#input\_services) | Services to create and register to the team. | `map(any)` | `{}` | no | @@ -308,7 +374,9 @@ Track the issue: https://github.com/opsgenie/terraform-provider-opsgenie/issues/ | [ssm\_path](#input\_ssm\_path) | SSM path | `string` | `"opsgenie"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | -| [team](#input\_team) | Configure the team inputs | `map(any)` | `{}` | no | +| [team\_name](#input\_team\_name) | Current OpsGenie Team Name | `string` | `null` | no | +| [team\_naming\_format](#input\_team\_naming\_format) | OpsGenie Team Naming Format | `string` | `"%s_%s"` | no | +| [team\_options](#input\_team\_options) | Configure the team options.
See `opsgenie_team` Terraform resource [documentation](https://registry.terraform.io/providers/opsgenie/opsgenie/latest/docs/resources/team#argument-reference) for more details. |
object({
description = optional(string)
ignore_members = optional(bool, false)
delete_default_resources = optional(bool, false)
})
| `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | ## Outputs @@ -322,9 +390,15 @@ Track the issue: https://github.com/opsgenie/terraform-provider-opsgenie/issues/ | [team\_members](#output\_team\_members) | Team members | | [team\_name](#output\_team\_name) | Team Name | + + +## Related How-to Guides + +[See OpsGenie in the Reference Architecture](https://docs.cloudposse.com/layers/alerting/opsgenie/) ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/opsgenie-team) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/opsgenie-team) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/opsgenie-team/datadog-integration.tf b/modules/opsgenie-team/datadog-integration.tf new file mode 100644 index 000000000..132712d01 --- /dev/null +++ b/modules/opsgenie-team/datadog-integration.tf @@ -0,0 +1,20 @@ +variable "datadog_integration_enabled" { + type = bool + default = true + description = "Whether to enable Datadog integration with opsgenie (datadog side)" +} + +data "aws_ssm_parameter" "opsgenie_team_api_key" { + count = local.enabled && var.datadog_integration_enabled ? 1 : 0 + name = module.integration["datadog"].ssm_path + with_decryption = true + depends_on = [module.integration] +} + +resource "datadog_integration_opsgenie_service_object" "fake_service_name" { + count = local.enabled && var.datadog_integration_enabled ? 1 : 0 + name = local.team_name + opsgenie_api_key = data.aws_ssm_parameter.opsgenie_team_api_key[0].value + region = "us" + depends_on = [module.integration, module.datadog_configuration] +} diff --git a/modules/opsgenie-team/default.auto.tfvars b/modules/opsgenie-team/default.auto.tfvars deleted file mode 100644 index afca9e898..000000000 --- a/modules/opsgenie-team/default.auto.tfvars +++ /dev/null @@ -1,5 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - -name = "opsgenie-team" diff --git a/modules/opsgenie-team/introspection.mixin.tf b/modules/opsgenie-team/introspection.mixin.tf deleted file mode 100644 index 593327249..000000000 --- a/modules/opsgenie-team/introspection.mixin.tf +++ /dev/null @@ -1,26 +0,0 @@ -locals { - # Throw an error if lookup fails - # tflint-ignore: terraform_unused_declarations - check_required_tags = module.this.enabled ? [ - for k in var.required_tags : - lookup(module.this.tags, k) - ] : [] -} - -variable "required_tags" { - type = list(string) - description = "List of required tag names" - default = [] -} - -# introspection module will contain the additional tags -module "introspection" { - source = "cloudposse/label/null" - version = "0.25.0" - - tags = merge(var.tags, { - "Component" = basename(abspath(path.module)) - }) - - context = module.this.context -} diff --git a/modules/opsgenie-team/main.tf b/modules/opsgenie-team/main.tf index 6021eceb6..1944e3a4e 100644 --- a/modules/opsgenie-team/main.tf +++ b/modules/opsgenie-team/main.tf @@ -23,7 +23,7 @@ data "opsgenie_team" "existing" { } data "opsgenie_user" "team_members" { - for_each = local.enabled ? { + for_each = local.enabled && !var.team_options.ignore_members ? { for member in var.members : member.user => member } : {} @@ -33,18 +33,18 @@ data "opsgenie_user" "team_members" { module "members_merge" { source = "cloudposse/config/yaml//modules/deepmerge" - version = "1.0.1" + version = "1.0.2" # Cannot use context to disable # See issue: https://github.com/cloudposse/terraform-yaml-config/issues/18 - count = local.enabled && lookup(var.team, "ignore_members", false) ? 1 : 0 + count = local.enabled && !var.team_options.ignore_members ? 1 : 0 maps = [ data.opsgenie_user.team_members, local.members, ] - # context = module.introspection.context + # context = module.this.context } module "team" { @@ -57,9 +57,9 @@ module "team" { team = merge({ name = module.this.name members = try(module.members_merge[0].merged, []) - }, var.team) + }, var.team_options, try(length(var.team_options.description), 0) == 0 ? { description = module.this.name } : {}) - context = module.introspection.context + context = module.this.context } module "integration" { @@ -67,7 +67,7 @@ module "integration" { # We add Datadog here because we need the core input for the team. # Can be overridden by var.integrations.datadog - for_each = var.integrations_enabled ? merge({ + for_each = local.enabled && var.integrations_enabled ? merge({ datadog : { type : "Datadog" } @@ -93,7 +93,7 @@ module "integration" { # Allow underscores in the identifier regex_replace_chars = "/[^a-zA-Z0-9-_]/" - context = module.introspection.context + context = module.this.context depends_on = [module.team] } @@ -102,7 +102,7 @@ module "service" { source = "cloudposse/incident-management/opsgenie//modules/service" version = "0.16.0" - for_each = var.services + for_each = local.enabled ? var.services : {} # Only create if not reusing an existing team enabled = local.create_all_enabled @@ -113,7 +113,7 @@ module "service" { description = lookup(each.value, "description", null) } - context = module.introspection.context + context = module.this.context depends_on = [module.team] } @@ -122,23 +122,23 @@ module "schedule" { source = "cloudposse/incident-management/opsgenie//modules/schedule" version = "0.16.0" - for_each = { + for_each = local.enabled ? { for k, v in var.schedules : k => v if try(v.enabled == true, false) - } + } : {} # Only create if not reusing an existing team enabled = local.create_all_enabled schedule = { - name = try(each.key, null) + name = try(format(var.team_naming_format, local.team_name, each.key), null) description = try(each.value.description, null) timezone = try(each.value.timezone, null) owner_team_id = local.team_id } - context = module.introspection.context + context = module.this.context depends_on = [ module.team, @@ -148,11 +148,11 @@ module "schedule" { module "routing" { source = "./modules/routing" - for_each = { + for_each = local.enabled ? { for k, v in var.routing_rules : k => v if try(v.enabled == true, false) - } + } : {} # Only create if not reusing an existing team enabled = local.create_all_enabled @@ -160,11 +160,12 @@ module "routing" { team_name = local.team_name name = each.key - criteria = try(each.value.criteria, null) - type = try(each.value.type, null) - notify = try(each.value.notify, null) - order = try(each.value.order, null) - priority = try(each.value.priority, null) + is_default = try(each.value.is_default, null) + criteria = try(each.value.criteria, null) + type = try(each.value.type, null) + notify = try(each.value.notify, null) + order = try(each.value.order, null) + priority = try(each.value.priority, null) # We send the map of services services = var.services @@ -178,7 +179,7 @@ module "routing" { # Allow underscores in the name regex_replace_chars = "/[^a-zA-Z0-9-_]/" - context = module.introspection.context + context = module.this.context depends_on = [ module.team, @@ -191,11 +192,11 @@ module "routing" { module "escalation" { source = "./modules/escalation" - for_each = { + for_each = local.enabled ? { for k, v in var.escalations : k => v if try(v.enabled == true, false) - } + } : {} # Only create if not reusing an existing team enabled = local.create_all_enabled @@ -210,7 +211,10 @@ module "escalation" { repeat = try(each.value.repeat, null) } - context = module.introspection.context + context = module.this.context + + team_name = local.team_name + team_naming_format = var.team_naming_format depends_on = [ module.team, diff --git a/modules/opsgenie-team/modules/escalation/README.md b/modules/opsgenie-team/modules/escalation/README.md index 298860692..d57862655 100644 --- a/modules/opsgenie-team/modules/escalation/README.md +++ b/modules/opsgenie-team/modules/escalation/README.md @@ -1,11 +1,11 @@ ## Escalation -Terraform module to configure [Opsgenie Escalation](https://registry.terraform.io/providers/opsgenie/opsgenie/latest/docs/resources/escalation) - +Terraform module to configure +[Opsgenie Escalation](https://registry.terraform.io/providers/opsgenie/opsgenie/latest/docs/resources/escalation) ## Usage -[Create Opsgenie Escalation example](../../examples/escalation) +[Create Opsgenie Escalation example](https://github.com/cloudposse/terraform-opsgenie-incident-management/tree/main/examples/escalation) ```hcl module "escalation" { @@ -27,19 +27,20 @@ module "escalation" { } ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.0 | -| [opsgenie](#requirement\_opsgenie) | >= 0.4 | +| [terraform](#requirement\_terraform) | >= 1.0 | +| [opsgenie](#requirement\_opsgenie) | >= 0.6.7 | ## Providers | Name | Version | |------|---------| -| [opsgenie](#provider\_opsgenie) | >= 0.4 | +| [opsgenie](#provider\_opsgenie) | >= 0.6.7 | ## Modules @@ -78,6 +79,8 @@ module "escalation" { | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [team\_name](#input\_team\_name) | Current OpsGenie Team Name | `string` | `null` | no | +| [team\_naming\_format](#input\_team\_naming\_format) | OpsGenie Team Naming Format | `string` | `"%s_%s"` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | ## Outputs @@ -87,4 +90,4 @@ module "escalation" { | [escalation\_id](#output\_escalation\_id) | The ID of the Opsgenie Escalation | | [escalation\_name](#output\_escalation\_name) | Name of the Opsgenie Escalation | - + diff --git a/modules/opsgenie-team/modules/escalation/main.tf b/modules/opsgenie-team/modules/escalation/main.tf index 81ac73655..8083d710c 100644 --- a/modules/opsgenie-team/modules/escalation/main.tf +++ b/modules/opsgenie-team/modules/escalation/main.tf @@ -1,19 +1,20 @@ locals { - lookup_teams = distinct(flatten([ + enabled = module.this.enabled && var.escalation != null && length(var.escalation.rules) > 0 + lookup_teams = local.enabled ? distinct(flatten([ for rule in var.escalation.rules : rule.recipient.name if rule.recipient.type == "team" - ])) - lookup_users = distinct(flatten([ + ])) : [] + lookup_users = local.enabled ? distinct(flatten([ for rule in var.escalation.rules : rule.recipient.name if rule.recipient.type == "user" - ])) - lookup_schedules = distinct(flatten([ + ])) : [] + lookup_schedules = local.enabled ? distinct(flatten([ for rule in var.escalation.rules : - rule.recipient.name - if rule.recipient.type == "schedule" - ])) + format(var.team_naming_format, var.team_name, rule.recipient.name) + if rule.recipient.type == "schedule" && module.this.enabled + ])) : [] } data "opsgenie_team" "recipient" { @@ -35,7 +36,7 @@ data "opsgenie_schedule" "recipient" { resource "opsgenie_escalation" "this" { count = module.this.enabled ? 1 : 0 - name = var.escalation.name + name = format(var.team_naming_format, var.team_name, var.escalation.name) description = try(var.escalation.description, var.escalation.name) owner_team_id = try(var.escalation.owner_team_id, null) @@ -49,7 +50,7 @@ resource "opsgenie_escalation" "this" { # In spite of the docs, only one recipient can be used per escalation resource with multiple rules recipient { - id = rules.value.recipient.type == "team" ? data.opsgenie_team.recipient[rules.value.recipient.name].id : rules.value.recipient.type == "schedule" ? data.opsgenie_schedule.recipient[rules.value.recipient.name].id : data.opsgenie_user.recipient[rules.value.recipient.name].id + id = rules.value.recipient.type == "team" ? data.opsgenie_team.recipient[rules.value.recipient.name].id : rules.value.recipient.type == "schedule" ? data.opsgenie_schedule.recipient[format(var.team_naming_format, var.team_name, rules.value.recipient.name)].id : data.opsgenie_user.recipient[rules.value.recipient.name].id type = rules.value.recipient.type } } diff --git a/modules/opsgenie-team/modules/escalation/opsgenie.context.tf b/modules/opsgenie-team/modules/escalation/opsgenie.context.tf new file mode 100644 index 000000000..a6fa39d3b --- /dev/null +++ b/modules/opsgenie-team/modules/escalation/opsgenie.context.tf @@ -0,0 +1,11 @@ +variable "team_name" { + type = string + default = null + description = "Current OpsGenie Team Name" +} + +variable "team_naming_format" { + type = string + default = "%s_%s" + description = "OpsGenie Team Naming Format" +} diff --git a/modules/opsgenie-team/modules/escalation/variables.tf b/modules/opsgenie-team/modules/escalation/variables.tf index b6d512a0d..6db977f7e 100644 --- a/modules/opsgenie-team/modules/escalation/variables.tf +++ b/modules/opsgenie-team/modules/escalation/variables.tf @@ -2,4 +2,4 @@ variable "escalation" { default = {} type = any description = "Opsgenie Escalation configuration" -} \ No newline at end of file +} diff --git a/modules/opsgenie-team/modules/escalation/versions.tf b/modules/opsgenie-team/modules/escalation/versions.tf index 240bbd512..87be4ce66 100644 --- a/modules/opsgenie-team/modules/escalation/versions.tf +++ b/modules/opsgenie-team/modules/escalation/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 0.13.0" + required_version = ">= 1.0" required_providers { opsgenie = { source = "opsgenie/opsgenie" - version = ">= 0.4" + version = ">= 0.6.7" } } } diff --git a/modules/opsgenie-team/modules/integration/README.md b/modules/opsgenie-team/modules/integration/README.md index ec6c772d5..fe7b0a1f8 100644 --- a/modules/opsgenie-team/modules/integration/README.md +++ b/modules/opsgenie-team/modules/integration/README.md @@ -1,5 +1,8 @@ ## Integration +This module creates an OpsGenie integrations for a team. By Default, it creates a Datadog integration. + + ## Requirements @@ -20,7 +23,7 @@ |------|--------|---------| | [api\_integration](#module\_api\_integration) | cloudposse/incident-management/opsgenie//modules/api_integration | 0.16.0 | | [integration\_name](#module\_integration\_name) | cloudposse/label/null | 0.25.0 | -| [ssm\_parameter\_store](#module\_ssm\_parameter\_store) | cloudposse/ssm-parameter-store/aws | 0.10.0 | +| [ssm\_parameter\_store](#module\_ssm\_parameter\_store) | cloudposse/ssm-parameter-store/aws | 0.11.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -65,3 +68,4 @@ | [ssm\_path](#output\_ssm\_path) | Full SSM path of the team integration key | | [type](#output\_type) | Type of the team integration | + diff --git a/modules/opsgenie-team/modules/integration/main.tf b/modules/opsgenie-team/modules/integration/main.tf index bf3b65466..d45635c8c 100644 --- a/modules/opsgenie-team/modules/integration/main.tf +++ b/modules/opsgenie-team/modules/integration/main.tf @@ -225,7 +225,7 @@ resource "opsgenie_integration_action" "datadog" { # Or they can be used programmatically, if their respective Terraform provider supports it. module "ssm_parameter_store" { source = "cloudposse/ssm-parameter-store/aws" - version = "0.10.0" + version = "0.11.0" # KMS key is only applied to SecureString params # https://github.com/cloudposse/terraform-aws-ssm-parameter-store/blob/master/main.tf#L17 diff --git a/modules/opsgenie-team/modules/routing/README.md b/modules/opsgenie-team/modules/routing/README.md index b12efbdfb..a69fa1d28 100644 --- a/modules/opsgenie-team/modules/routing/README.md +++ b/modules/opsgenie-team/modules/routing/README.md @@ -1,5 +1,10 @@ ## Routing +This module creates team routing rules, these are the initial rules that are applied to an alert to determine who gets +notified. This module also creates incident service rules, which determine if an alert is considered a service incident +or not. + + ## Requirements @@ -19,6 +24,7 @@ | Name | Source | Version | |------|--------|---------| | [service\_incident\_rule](#module\_service\_incident\_rule) | cloudposse/incident-management/opsgenie//modules/service_incident_rule | 0.16.0 | +| [serviceless\_incident\_rule](#module\_serviceless\_incident\_rule) | cloudposse/incident-management/opsgenie//modules/service_incident_rule | 0.16.0 | | [team\_routing\_rule](#module\_team\_routing\_rule) | cloudposse/incident-management/opsgenie//modules/team_routing_rule | 0.16.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | @@ -44,6 +50,7 @@ | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | | [incident\_properties](#input\_incident\_properties) | Properties to override on the incident routing rule | `map(any)` | n/a | yes | +| [is\_default](#input\_is\_default) | Set this alerting route as the default route | `bool` | `false` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -57,7 +64,8 @@ | [services](#input\_services) | Team services to associate with incident routing rules | `map(any)` | `null` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | -| [team\_name](#input\_team\_name) | Name of the team to assign this integration to | `string` | n/a | yes | +| [team\_name](#input\_team\_name) | Current OpsGenie Team Name | `string` | `null` | no | +| [team\_naming\_format](#input\_team\_naming\_format) | OpsGenie Team Naming Format | `string` | `"%s_%s"` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [time\_restriction](#input\_time\_restriction) | Time restriction of alert routing rule | `any` | `null` | no | | [timezone](#input\_timezone) | Timezone for this alerting route | `string` | `null` | no | @@ -70,3 +78,4 @@ | [service\_incident\_rule](#output\_service\_incident\_rule) | Service incident rules for incidents | | [team\_routing\_rule](#output\_team\_routing\_rule) | Team routing rules for alerts | + diff --git a/modules/opsgenie-team/modules/routing/main.tf b/modules/opsgenie-team/modules/routing/main.tf index 412d73be6..fc41bee23 100644 --- a/modules/opsgenie-team/modules/routing/main.tf +++ b/modules/opsgenie-team/modules/routing/main.tf @@ -46,7 +46,8 @@ module "team_routing_rule" { notify = [{ type = var.notify.type - id = join("", data.opsgenie_schedule.notification_schedule.*.id) + name = try(format(var.team_naming_format, var.team_name, var.notify.name), null) + id = try(join("", data.opsgenie_schedule.notification_schedule.*.id), "") }] criteria = { @@ -61,11 +62,16 @@ module "team_routing_rule" { context = module.this.context } +locals { + default_service = { for k, v in var.services : k => v if k == "default_service" } + services = { for k, v in var.services : k => v if k != "default_service" } +} + module "service_incident_rule" { source = "cloudposse/incident-management/opsgenie//modules/service_incident_rule" version = "0.16.0" - for_each = local.service_incident_rule_enabled ? var.services : {} + for_each = local.service_incident_rule_enabled ? local.services : {} service_incident_rule = { service_id = data.opsgenie_service.incident_service[each.key].id @@ -96,3 +102,38 @@ module "service_incident_rule" { context = module.this.context } + + +module "serviceless_incident_rule" { + source = "cloudposse/incident-management/opsgenie//modules/service_incident_rule" + version = "0.16.0" + + depends_on = [data.opsgenie_service.incident_service] + + for_each = local.service_incident_rule_enabled ? local.default_service : {} + + service_incident_rule = { + service_id = data.opsgenie_service.incident_service[each.key].id + + incident_rule = { + condition_match_type = var.criteria.type + conditions = try(var.criteria.conditions, null) + + incident_properties = { + message = try(var.incident_properties.message, "{{message}}") + tags = try(var.incident_properties.tags, []) + details = try(var.incident_properties.details, {}) + + priority = var.priority + + stakeholder_properties = { + message = try(var.incident_properties.message, "{{message}}") + description = try(var.incident_properties.description, null) + enable = try(var.incident_properties.update_stakeholders, true) + } + } + } + } + + context = module.this.context +} diff --git a/modules/opsgenie-team/modules/routing/opsgenie.context.tf b/modules/opsgenie-team/modules/routing/opsgenie.context.tf new file mode 100644 index 000000000..a6fa39d3b --- /dev/null +++ b/modules/opsgenie-team/modules/routing/opsgenie.context.tf @@ -0,0 +1,11 @@ +variable "team_name" { + type = string + default = null + description = "Current OpsGenie Team Name" +} + +variable "team_naming_format" { + type = string + default = "%s_%s" + description = "OpsGenie Team Naming Format" +} diff --git a/modules/opsgenie-team/modules/routing/variables.tf b/modules/opsgenie-team/modules/routing/variables.tf index a5c503624..3ab43c5f2 100644 --- a/modules/opsgenie-team/modules/routing/variables.tf +++ b/modules/opsgenie-team/modules/routing/variables.tf @@ -1,8 +1,3 @@ -variable "team_name" { - type = string - description = "Name of the team to assign this integration to" -} - variable "criteria" { type = object({ type = string, @@ -68,3 +63,10 @@ variable "time_restriction" { default = null description = "Time restriction of alert routing rule" } + +variable "is_default" { + type = bool + default = false + description = "Set this alerting route as the default route" + +} diff --git a/modules/opsgenie-team/opsgenie.context.tf b/modules/opsgenie-team/opsgenie.context.tf new file mode 100644 index 000000000..a6fa39d3b --- /dev/null +++ b/modules/opsgenie-team/opsgenie.context.tf @@ -0,0 +1,11 @@ +variable "team_name" { + type = string + default = null + description = "Current OpsGenie Team Name" +} + +variable "team_naming_format" { + type = string + default = "%s_%s" + description = "OpsGenie Team Naming Format" +} diff --git a/modules/opsgenie-team/outputs.tf b/modules/opsgenie-team/outputs.tf index d53b72a99..129575969 100644 --- a/modules/opsgenie-team/outputs.tf +++ b/modules/opsgenie-team/outputs.tf @@ -4,7 +4,7 @@ output "team_members" { } output "team_name" { - value = local.team_name + value = local.enabled ? local.team_name : null description = "Team Name" } @@ -14,16 +14,16 @@ output "team_id" { } output "integration" { - value = module.integration + value = local.enabled ? module.integration : null description = "Integrations created" } output "routing" { - value = module.routing + value = local.enabled ? module.routing : null description = "Routing rules created" } output "escalation" { - value = module.escalation + value = local.enabled ? module.escalation : null description = "Escalation rules created" } diff --git a/modules/opsgenie-team/provider-datadog.tf b/modules/opsgenie-team/provider-datadog.tf new file mode 100644 index 000000000..d70c1da5e --- /dev/null +++ b/modules/opsgenie-team/provider-datadog.tf @@ -0,0 +1,14 @@ +// This is a custom provider-datadog.tf because it is always enabled, this is because we always need the datadog provider to be configured, even if the module is disabled. + +module "datadog_configuration" { + source = "../datadog-configuration/modules/datadog_keys" + enabled = true + context = module.this.context +} + +provider "datadog" { + api_key = module.datadog_configuration.datadog_api_key + app_key = module.datadog_configuration.datadog_app_key + api_url = module.datadog_configuration.datadog_api_url + validate = "true" +} diff --git a/modules/opsgenie-team/provider-opsgenie.tf b/modules/opsgenie-team/provider-opsgenie.tf new file mode 100644 index 000000000..5b067ef80 --- /dev/null +++ b/modules/opsgenie-team/provider-opsgenie.tf @@ -0,0 +1,8 @@ +data "aws_ssm_parameter" "opsgenie_api_key" { + name = format(var.ssm_parameter_name_format, var.ssm_path, "opsgenie_api_key") + with_decryption = true +} + +provider "opsgenie" { + api_key = join("", data.aws_ssm_parameter.opsgenie_api_key[*].value) +} diff --git a/modules/opsgenie-team/providers.tf b/modules/opsgenie-team/providers.tf index 63ca6fe51..ef923e10a 100644 --- a/modules/opsgenie-team/providers.tf +++ b/modules/opsgenie-team/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,25 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -data "aws_ssm_parameter" "opsgenie_api_key" { - name = format(var.ssm_parameter_name_format, var.ssm_path, "opsgenie_api_key") - with_decryption = true -} - -provider "opsgenie" { - api_key = data.aws_ssm_parameter.opsgenie_api_key.value -} - diff --git a/modules/opsgenie-team/ssm.tf b/modules/opsgenie-team/ssm.tf index fd39d4571..452963f68 100644 --- a/modules/opsgenie-team/ssm.tf +++ b/modules/opsgenie-team/ssm.tf @@ -15,4 +15,3 @@ variable "ssm_path" { default = "opsgenie" description = "SSM path" } - diff --git a/modules/opsgenie-team/variables.tf b/modules/opsgenie-team/variables.tf index 2ea925d27..fffae0d5a 100644 --- a/modules/opsgenie-team/variables.tf +++ b/modules/opsgenie-team/variables.tf @@ -46,10 +46,18 @@ variable "integrations_enabled" { description = "Whether to enable the integrations submodule or not" } -variable "team" { - type = map(any) +variable "team_options" { + type = object({ + description = optional(string) + ignore_members = optional(bool, false) + delete_default_resources = optional(bool, false) + }) + description = <<-EOT + Configure the team options. + See `opsgenie_team` Terraform resource [documentation](https://registry.terraform.io/providers/opsgenie/opsgenie/latest/docs/resources/team#argument-reference) for more details. + EOT default = {} - description = "Configure the team inputs" + nullable = false } variable "escalations" { diff --git a/modules/opsgenie-team/versions.tf b/modules/opsgenie-team/versions.tf index f013c9ee4..8dfc3b0bf 100644 --- a/modules/opsgenie-team/versions.tf +++ b/modules/opsgenie-team/versions.tf @@ -1,14 +1,18 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.9.0" } opsgenie = { source = "opsgenie/opsgenie" version = ">= 0.6.7" } + datadog = { + source = "datadog/datadog" + version = ">= 3.3.0" + } } } diff --git a/modules/philips-labs-github-runners/README.md b/modules/philips-labs-github-runners/README.md new file mode 100644 index 000000000..9a13c7806 --- /dev/null +++ b/modules/philips-labs-github-runners/README.md @@ -0,0 +1,166 @@ +--- +tags: + - component/philips-labs-github-runners + - layer/github + - provider/aws +--- + +# Component: `philips-labs-github-runners` + +This component is responsible for provisioning the surrounding infrastructure for the github runners. + +## Prerequisites + +- Github App installed on the organization + - For more details see + [Philips Lab's Setting up a Github App](https://github.com/philips-labs/terraform-aws-github-runner/tree/main#setup-github-app-part-1) + - Ensure you create a **PRIVATE KEY** and store it in SSM, **NOT** to be confused with a **Client Secret**. Private + Keys are created in the GitHub App Configuration and scrolling to the bottom. +- Github App ID and private key stored in SSM under `/pl-github-runners/id` (or the value of + `var.github_app_id_ssm_path`) +- Github App Private Key stored in SSM (base64 encoded) under `/pl-github-runners/key` (or the value of + `var.github_app_key_ssm_path`) + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + philips-labs-github-runners: + vars: + enabled: true +``` + +The following will create + +- An API Gateway +- Lambdas +- SQS Queue +- EC2 Launch Template instances + +The API Gateway is registered as a webhook within the GitHub app. Which scales up or down, via lambdas, the EC2 Launch +Template by the number of messages in the SQS queue. + +![Architecture](https://github.com/philips-labs/terraform-aws-github-runner/blob/main/docs/component-overview.svg) + +## Modules + +### `webhook-github-app` + +This is a fork of https://github.com/philips-labs/terraform-aws-github-runner/tree/main/modules/webhook-github-app. + +We customized it until this PR is resolved as it does not update the github app webhook until this is merged. + +- https://github.com/philips-labs/terraform-aws-github-runner/pull/3625 + +This module also requires an environment variable + +- `GH_TOKEN` - a github token be set + +This module also requires the `gh` cli to be installed. Your Dockerfile can be updated to include the following to +install it: + +```dockerfile +ARG GH_CLI_VERSION=2.39.1 +# ... +ARG GH_CLI_VERSION +RUN apt-get update && apt-get install -y --allow-downgrades \ + gh="${GH_CLI_VERSION}-*" +``` + +By default, we leave this disabled, as it requires a github token to be set. You can enable it by setting +`var.enable_update_github_app_webhook` to `true`. When enabled, it will update the github app webhook to point to the +API Gateway. This can occur if the API Gateway is deleted and recreated. + +When disabled, you will need to manually update the github app webhook to point to the API Gateway. This is output by +the component, and available via the `webhook` output under `endpoint`. + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [local](#requirement\_local) | >= 2.4.0 | +| [random](#requirement\_random) | >= 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [random](#provider\_random) | >= 3.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [github\_runner](#module\_github\_runner) | philips-labs/github-runner/aws | 5.4.2 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [module\_artifact](#module\_module\_artifact) | cloudposse/module-artifact/external | 0.8.0 | +| [store\_read](#module\_store\_read) | cloudposse/ssm-parameter-store/aws | 0.11.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [webhook\_github\_app](#module\_webhook\_github\_app) | philips-labs/github-runner/aws//modules/webhook-github-app | 5.4.2 | + +## Resources + +| Name | Type | +|------|------| +| [random_id.webhook_secret](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_service\_linked\_role\_spot](#input\_create\_service\_linked\_role\_spot) | (optional) create the service linked role for spot instances that is required by the scale-up lambda. | `bool` | `true` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enable\_update\_github\_app\_webhook](#input\_enable\_update\_github\_app\_webhook) | Enable updating the github app webhook | `bool` | `false` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [github\_app\_id\_ssm\_path](#input\_github\_app\_id\_ssm\_path) | Path to the github app id in SSM | `string` | `"/pl-github-runners/id"` | no | +| [github\_app\_key\_ssm\_path](#input\_github\_app\_key\_ssm\_path) | Path to the github key in SSM | `string` | `"/pl-github-runners/key"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [instance\_target\_capacity\_type](#input\_instance\_target\_capacity\_type) | Default lifecycle used for runner instances, can be either `spot` or `on-demand`. | `string` | `"spot"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS region | `string` | n/a | yes | +| [release\_version](#input\_release\_version) | Version of the application | `string` | `"v5.4.0"` | no | +| [runner\_extra\_labels](#input\_runner\_extra\_labels) | Extra (custom) labels for the runners (GitHub). Labels checks on the webhook can be enforced by setting `enable_workflow_job_labels_check`. GitHub read-only labels should not be provided. | `list(string)` |
[
"default"
]
| no | +| [scale\_up\_reserved\_concurrent\_executions](#input\_scale\_up\_reserved\_concurrent\_executions) | Amount of reserved concurrent executions for the scale-up lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations. | `number` | `-1` | no | +| [ssm\_paths](#input\_ssm\_paths) | The root path used in SSM to store configuration and secrets. |
object({
root = optional(string, "github-action-runners")
app = optional(string, "app")
runners = optional(string, "runners")
use_prefix = optional(bool, true)
})
| `{}` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [github\_runners](#output\_github\_runners) | Information about the GitHub runners. | +| [queues](#output\_queues) | Information about the GitHub runner queues. Such as `build_queue_arn` the ARN of the SQS queue to use for the build queue. | +| [ssm\_parameters](#output\_ssm\_parameters) | Information about the SSM parameters to use to register the runner. | +| [webhook](#output\_webhook) | Information about the webhook to use to register the runner. | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/ecs) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/philips-labs-github-runners/context.tf b/modules/philips-labs-github-runners/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/philips-labs-github-runners/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/philips-labs-github-runners/main.tf b/modules/philips-labs-github-runners/main.tf new file mode 100644 index 000000000..1e76d2ff1 --- /dev/null +++ b/modules/philips-labs-github-runners/main.tf @@ -0,0 +1,110 @@ +locals { + enabled = module.this.enabled + version = local.enabled ? var.release_version : null + + lambdas = local.enabled ? { + webhook = { + name = "webhook.zip" + tag = local.version + }, + runners = { + name = "runners.zip" + tag = local.version + }, + runner-binaries-syncer = { + name = "runner-binaries-syncer.zip" + tag = local.version + } + } : {} +} + +module "store_read" { + count = local.enabled ? 1 : 0 + + source = "cloudposse/ssm-parameter-store/aws" + version = "0.11.0" + + parameter_read = [ + var.github_app_key_ssm_path, + var.github_app_id_ssm_path + ] +} + +resource "random_id" "webhook_secret" { + byte_length = 20 +} + +module "module_artifact" { + for_each = local.lambdas + + source = "cloudposse/module-artifact/external" + version = "0.8.0" + + filename = each.value.name + module_name = module.this.name + url = "https://github.com/philips-labs/terraform-aws-github-runner/releases/download/${each.value.tag}/${each.key}.zip" + curl_arguments = ["-fsSL"] + + module_path = path.module + + context = module.this.context +} + +module "github_runner" { + count = local.enabled ? 1 : 0 + + source = "philips-labs/github-runner/aws" + version = "5.4.2" + + depends_on = [module.module_artifact] + + aws_region = var.region + vpc_id = module.vpc.outputs.vpc_id + subnet_ids = module.vpc.outputs.private_subnet_ids + + github_app = { + key_base64 = module.store_read[0].map[var.github_app_key_ssm_path] + id = module.store_read[0].map[var.github_app_id_ssm_path] + webhook_secret = random_id.webhook_secret.hex + } + + # here we hardcode the names of the lambda zips because they always have the same name, + # the output of the fetch lambdas module is a list of zip names, which we cannot be certain will have the same order. + webhook_lambda_zip = "webhook.zip" + runner_binaries_syncer_lambda_zip = "runner-binaries-syncer.zip" + runners_lambda_zip = "runners.zip" + + enable_organization_runners = true + enable_ssm_on_runners = true + ssm_paths = var.ssm_paths + instance_target_capacity_type = var.instance_target_capacity_type + create_service_linked_role_spot = var.create_service_linked_role_spot + enable_fifo_build_queue = true + scale_up_reserved_concurrent_executions = var.scale_up_reserved_concurrent_executions + + enable_user_data_debug_logging_runner = true + + # this variable is substituted in the user-data.sh startup script. It cannot point to another script if using a base ami. + # instead this will just run after the runner is installed. Hence we use `file` to read the contents of the file which is injected into the user-data.sh + userdata_post_install = file("${path.module}/templates/userdata_post_install.sh") + userdata_pre_install = file("${path.module}/templates/userdata_pre_install.sh") + + runner_extra_labels = var.runner_extra_labels + + tags = module.this.tags +} + +module "webhook_github_app" { + count = local.enabled && var.enable_update_github_app_webhook ? 1 : 0 + source = "philips-labs/github-runner/aws//modules/webhook-github-app" + version = "5.4.2" + + depends_on = [module.github_runner] + + github_app = { + key_base64 = module.store_read[0].map[var.github_app_key_ssm_path] + id = module.store_read[0].map[var.github_app_id_ssm_path] + webhook_secret = random_id.webhook_secret.hex + } + webhook_endpoint = one(module.github_runner[*].webhook.endpoint) +} diff --git a/modules/philips-labs-github-runners/outputs.tf b/modules/philips-labs-github-runners/outputs.tf new file mode 100644 index 000000000..d81800398 --- /dev/null +++ b/modules/philips-labs-github-runners/outputs.tf @@ -0,0 +1,19 @@ +output "webhook" { + description = "Information about the webhook to use to register the runner." + value = one(module.github_runner[*].webhook) +} + +output "ssm_parameters" { + description = "Information about the SSM parameters to use to register the runner." + value = one(module.github_runner[*].ssm_parameters) +} + +output "github_runners" { + description = "Information about the GitHub runners." + value = one(module.github_runner[*].runners) +} + +output "queues" { + description = "Information about the GitHub runner queues. Such as `build_queue_arn` the ARN of the SQS queue to use for the build queue." + value = one(module.github_runner[*].queues) +} diff --git a/modules/philips-labs-github-runners/providers.tf b/modules/philips-labs-github-runners/providers.tf new file mode 100644 index 000000000..54257fd20 --- /dev/null +++ b/modules/philips-labs-github-runners/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = module.iam_roles.terraform_role_arn + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/philips-labs-github-runners/remote-state.tf b/modules/philips-labs-github-runners/remote-state.tf new file mode 100644 index 000000000..757ef9067 --- /dev/null +++ b/modules/philips-labs-github-runners/remote-state.tf @@ -0,0 +1,8 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "vpc" + + context = module.this.context +} diff --git a/modules/philips-labs-github-runners/templates/userdata_post_install.sh b/modules/philips-labs-github-runners/templates/userdata_post_install.sh new file mode 100644 index 000000000..6150351c1 --- /dev/null +++ b/modules/philips-labs-github-runners/templates/userdata_post_install.sh @@ -0,0 +1,19 @@ + +echo "Installing Custom Packages..." +yum install -y make + +# Install AWS CLI +curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +unzip awscliv2.zip +sudo ./aws/install + +# Install `gh` CLI +type -p yum-config-manager >/dev/null || sudo yum install -y yum-utils +sudo yum-config-manager --add-repo https://cli.github.com/packages/rpm/gh-cli.repo +sudo yum install -y gh + +# Install nodejs +sudo yum install -y nodejs-1:18.18.2-1.amzn2023.0.1 + +# Install terraform-docs +curl -L "$(curl -s https://api.github.com/repos/terraform-docs/terraform-docs/releases/latest | grep -o -E -m 1 "https://.+?-linux-amd64.tar.gz")" > terraform-docs.tgz && tar -xzf terraform-docs.tgz terraform-docs && rm terraform-docs.tgz && chmod +x terraform-docs && sudo mv terraform-docs /usr/bin/ diff --git a/modules/philips-labs-github-runners/templates/userdata_pre_install.sh b/modules/philips-labs-github-runners/templates/userdata_pre_install.sh new file mode 100644 index 000000000..96bd32681 --- /dev/null +++ b/modules/philips-labs-github-runners/templates/userdata_pre_install.sh @@ -0,0 +1,8 @@ +# From https://github.com/aws-observability/aws-otel-test-framework/pull/1425/files +## Fixes: Error loading Python lib '/tmp/_MEIaR70C0/libpython3.7m.so.1.0': dlopen: libcrypt.so.1: cannot open shared object file: No such file or directory + +echo "Custom Pre-Install Script" +sudo yum update -y +sudo yum install -y libxcrypt-compat +sudo yum install -y docker +sudo ln -s /usr/lib/libcrypt.so /usr/lib/libcrypt.so.1 diff --git a/modules/philips-labs-github-runners/variables.tf b/modules/philips-labs-github-runners/variables.tf new file mode 100644 index 000000000..9eef9607d --- /dev/null +++ b/modules/philips-labs-github-runners/variables.tf @@ -0,0 +1,69 @@ +variable "region" { + type = string + description = "AWS region" +} + +variable "enable_update_github_app_webhook" { + type = bool + description = "Enable updating the github app webhook" + default = false +} + +variable "release_version" { + type = string + description = "Version of the application" + default = "v5.4.0" +} + +variable "github_app_key_ssm_path" { + type = string + description = "Path to the github key in SSM" + default = "/pl-github-runners/key" +} + +variable "github_app_id_ssm_path" { + type = string + description = "Path to the github app id in SSM" + default = "/pl-github-runners/id" +} + +variable "runner_extra_labels" { + description = "Extra (custom) labels for the runners (GitHub). Labels checks on the webhook can be enforced by setting `enable_workflow_job_labels_check`. GitHub read-only labels should not be provided." + type = list(string) + default = ["default"] +} + +variable "scale_up_reserved_concurrent_executions" { + description = "Amount of reserved concurrent executions for the scale-up lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations." + type = number + # default from philips labs is 1, which gives an error when creating the lambda Specified ReservedConcurrentExecutions for function decreases account's UnreservedConcurrentExecution below its minimum value of [10] + # https://github.com/philips-labs/terraform-aws-github-runner/issues/1671 + default = -1 +} + +variable "instance_target_capacity_type" { + description = "Default lifecycle used for runner instances, can be either `spot` or `on-demand`." + type = string + default = "spot" + validation { + condition = contains(["spot", "on-demand"], var.instance_target_capacity_type) + error_message = "The instance target capacity should be either spot or on-demand." + } +} + +variable "create_service_linked_role_spot" { + description = "(optional) create the service linked role for spot instances that is required by the scale-up lambda." + type = bool + default = true +} + +variable "ssm_paths" { + description = "The root path used in SSM to store configuration and secrets." + type = object({ + root = optional(string, "github-action-runners") + app = optional(string, "app") + runners = optional(string, "runners") + use_prefix = optional(bool, true) + }) + default = {} +} diff --git a/modules/aurora-postgres/modules/postgresql-user/versions.tf b/modules/philips-labs-github-runners/versions.tf similarity index 65% rename from modules/aurora-postgres/modules/postgresql-user/versions.tf rename to modules/philips-labs-github-runners/versions.tf index ecb1ce1aa..cc5d839f3 100644 --- a/modules/aurora-postgres/modules/postgresql-user/versions.tf +++ b/modules/philips-labs-github-runners/versions.tf @@ -4,15 +4,15 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.0" + version = ">= 4.9.0" + } + local = { + source = "hashicorp/local" + version = ">= 2.4.0" } random = { source = "hashicorp/random" - version = ">= 2.3" - } - postgresql = { - source = "cyrilgdn/postgresql" - version = ">= 1.14.0" + version = ">= 3.0" } } } diff --git a/modules/rds/README.md b/modules/rds/README.md index dd892a694..841a441c4 100644 --- a/modules/rds/README.md +++ b/modules/rds/README.md @@ -1,11 +1,20 @@ +--- +tags: + - component/rds + - layer/data + - provider/aws +--- + # Component: `rds` -This component is responsible for provisioning an RDS instance. It seeds relevant database information (hostnames, username, password, etc.) into AWS SSM Parameter Store. +This component is responsible for provisioning an RDS instance. It seeds relevant database information (hostnames, +username, password, etc.) into AWS SSM Parameter Store. ## Security Groups Guidance: -By default this component creates a client security group and adds that security group id to the default attached security group. -Ideally other AWS resources that require RDS access can be granted this client security group. Additionally you can grant access -via specific CIDR blocks or security group ids. + +By default this component creates a client security group and adds that security group id to the default attached +security group. Ideally other AWS resources that require RDS access can be granted this client security group. +Additionally you can grant access via specific CIDR blocks or security group ids. ## Usage @@ -13,6 +22,35 @@ via specific CIDR blocks or security group ids. Here's an example snippet for how to use this component. +### PostgreSQL + +```yaml +components: + terraform: + rds/defaults: + metadata: + type: abstract + vars: + enabled: true + use_fullname: false + name: my-postgres-db + instance_class: db.t3.micro + database_name: my-postgres-db + # database_user: admin # enable to specify something specific + engine: postgres + engine_version: "15.2" + database_port: 5432 + db_parameter_group: "postgres15" + allocated_storage: 10 #GBs + ssm_enabled: true + client_security_group_enabled: true + ## The following settings allow the database to be accessed from anywhere + # publicly_accessible: true + # use_private_subnets: false + # allowed_cidr_blocks: + # - 0.0.0.0/0 +``` + ### Microsoft SQL ```yaml @@ -40,53 +78,58 @@ components: # This does not seem to work correctly deletion_protection: false ``` + ### Provisioning from a snapshot -The snapshot identifier variable can be added to provision an instance from a snapshot HOWEVER- -Keep in mind these instances are provisioned from a unique kms key per rds. -For clean terraform runs, you must first provision the key for the destination instance, then copy the snapshot using that kms key. + +The snapshot identifier variable can be added to provision an instance from a snapshot HOWEVER- Keep in mind these +instances are provisioned from a unique kms key per rds. For clean terraform runs, you must first provision the key for +the destination instance, then copy the snapshot using that kms key. Example - I want a new instance `rds-example-new` to be provisioned from a snapshot of `rds-example-old`: + 1. Use the console to manually make a snapshot of rds instance `rds-example-old` 1. provision the kms key for `rds-example-new` - ``` - atmos terraform plan rds-example-new -s ue1-staging '-target=module.kms_key_rds.aws_kms_key.default[0]' - atmos terraform apply rds-example-new -s ue1-staging '-target=module.kms_key_rds.aws_kms_key.default[0]' - ``` + ``` + atmos terraform plan rds-example-new -s ue1-staging '-target=module.kms_key_rds.aws_kms_key.default[0]' + atmos terraform apply rds-example-new -s ue1-staging '-target=module.kms_key_rds.aws_kms_key.default[0]' + ``` 1. Use the console to copy the snapshot to a new name using the above provisioned kms key -1. Add `snapshot_identifier` variable to `rds-example-new` catalog and specify the newly copied snapshot that used the above key +1. Add `snapshot_identifier` variable to `rds-example-new` catalog and specify the newly copied snapshot that used the + above key 1. Post provisioning, remove the `snapshot_idenfier` variable and verify terraform runs clean for the copied instance + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [random](#requirement\_random) | >= 2.3 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.0 | | [random](#provider\_random) | >= 2.3 | ## Modules | Name | Source | Version | |------|--------|---------| -| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [kms\_key\_rds](#module\_kms\_key\_rds) | cloudposse/kms-key/aws | 0.10.0 | -| [rds\_client\_sg](#module\_rds\_client\_sg) | cloudposse/security-group/aws | 0.3.1 | -| [rds\_instance](#module\_rds\_instance) | cloudposse/rds/aws | 0.38.5 | -| [rds\_monitoring\_role](#module\_rds\_monitoring\_role) | cloudposse/iam-role/aws | 0.16.2 | +| [kms\_key\_rds](#module\_kms\_key\_rds) | cloudposse/kms-key/aws | 0.12.1 | +| [rds\_client\_sg](#module\_rds\_client\_sg) | cloudposse/security-group/aws | 2.2.0 | +| [rds\_instance](#module\_rds\_instance) | cloudposse/rds/aws | 1.1.0 | +| [rds\_monitoring\_role](#module\_rds\_monitoring\_role) | cloudposse/iam-role/aws | 0.17.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -143,8 +186,6 @@ Example - I want a new instance `rds-example-new` to be provisioned from a snaps | [host\_name](#input\_host\_name) | The DB host name created in Route53 | `string` | `"db"` | no | | [iam\_database\_authentication\_enabled](#input\_iam\_database\_authentication\_enabled) | Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [instance\_class](#input\_instance\_class) | Class of RDS instance | `string` | n/a | yes | | [iops](#input\_iops) | The amount of provisioned IOPS. Setting this implies a storage\_type of 'io1'. Default is 0 if rds storage type is not 'io1' | `number` | `0` | no | | [kms\_alias\_name\_ssm](#input\_kms\_alias\_name\_ssm) | KMS alias name for SSM | `string` | `"alias/aws/ssm"` | no | @@ -183,17 +224,22 @@ Example - I want a new instance `rds-example-new` to be provisioned from a snaps | [ssm\_key\_user](#input\_ssm\_key\_user) | The SSM key to save the user. See `var.ssm_path_format`. | `string` | `"admin/db_user"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [storage\_encrypted](#input\_storage\_encrypted) | (Optional) Specifies whether the DB instance is encrypted. The default is false if not specified | `bool` | `true` | no | +| [storage\_throughput](#input\_storage\_throughput) | The storage throughput value for the DB instance. Can only be set when `storage_type` is `gp3`. Cannot be specified if the `allocated_storage` value is below a per-engine threshold. | `number` | `null` | no | | [storage\_type](#input\_storage\_type) | One of 'standard' (magnetic), 'gp2' (general purpose SSD), or 'io1' (provisioned IOPS SSD) | `string` | `"standard"` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [timezone](#input\_timezone) | Time zone of the DB instance. timezone is currently only supported by Microsoft SQL Server. The timezone can only be set on creation. See [MSSQL User Guide](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) for more information. | `string` | `null` | no | +| [use\_dns\_delegated](#input\_use\_dns\_delegated) | Use the dns-delegated dns\_zone\_id | `bool` | `false` | no | | [use\_eks\_security\_group](#input\_use\_eks\_security\_group) | Use the eks default security group | `bool` | `false` | no | +| [use\_private\_subnets](#input\_use\_private\_subnets) | Use private subnets | `bool` | `true` | no | ## Outputs | Name | Description | |------|-------------| | [exports](#output\_exports) | Map of exports for use in deployment configuration templates | +| [kms\_key\_alias](#output\_kms\_key\_alias) | The KMS key alias | +| [psql\_helper](#output\_psql\_helper) | A helper output to use with psql for connecting to this RDS instance. | | [rds\_address](#output\_rds\_address) | Address of the instance | | [rds\_arn](#output\_rds\_arn) | ARN of the instance | | [rds\_database\_ssm\_key\_prefix](#output\_rds\_database\_ssm\_key\_prefix) | SSM prefix | @@ -208,10 +254,11 @@ Example - I want a new instance `rds-example-new` to be provisioned from a snaps | [rds\_security\_group\_id](#output\_rds\_security\_group\_id) | ID of the Security Group | | [rds\_subnet\_group\_id](#output\_rds\_subnet\_group\_id) | ID of the created Subnet Group | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/rds) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/rds) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/rds/default.auto.tfvars b/modules/rds/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/rds/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/rds/kms.tf b/modules/rds/kms.tf index 4aef680ca..9fdc89244 100644 --- a/modules/rds/kms.tf +++ b/modules/rds/kms.tf @@ -1,6 +1,6 @@ module "kms_key_rds" { source = "cloudposse/kms-key/aws" - version = "0.10.0" + version = "0.12.1" description = "KMS key for RDS" deletion_window_in_days = 10 diff --git a/modules/rds/main.tf b/modules/rds/main.tf index 31bceff7d..6843f89fc 100644 --- a/modules/rds/main.tf +++ b/modules/rds/main.tf @@ -2,10 +2,10 @@ locals { enabled = module.this.enabled vpc_id = module.vpc.outputs.vpc_id - subnet_ids = module.vpc.outputs.private_subnet_ids + subnet_ids = var.use_private_subnets ? module.vpc.outputs.private_subnet_ids : module.vpc.outputs.public_subnet_ids eks_security_groups = var.use_eks_security_group ? [module.eks[0].outputs.eks_cluster_managed_security_group_id] : [] - dns_zone_id = module.dns_gbl_delegated.outputs.default_dns_zone_id + dns_zone_id = one(module.dns_gbl_delegated[*].outputs.default_dns_zone_id) create_user = local.enabled && length(var.database_user) == 0 create_password = local.enabled && length(var.database_password) == 0 @@ -18,11 +18,13 @@ locals { local.eks_security_groups, var.security_group_ids ) + + psql_access_enabled = local.enabled && (var.engine == "postgres") } module "rds_client_sg" { source = "cloudposse/security-group/aws" - version = "0.3.1" + version = "2.2.0" name = "${module.this.name}-client" enabled = module.this.enabled && var.client_security_group_enabled @@ -35,7 +37,7 @@ module "rds_client_sg" { module "rds_instance" { source = "cloudposse/rds/aws" - version = "0.38.5" + version = "1.1.0" allocated_storage = var.allocated_storage allow_major_version_upgrade = var.allow_major_version_upgrade @@ -58,7 +60,7 @@ module "rds_instance" { db_parameter_group = var.db_parameter_group db_subnet_group_name = var.db_subnet_group_name deletion_protection = var.deletion_protection - dns_zone_id = local.dns_zone_id + dns_zone_id = local.dns_zone_id != null ? local.dns_zone_id : "" enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports engine = var.engine engine_version = var.engine_version @@ -73,7 +75,7 @@ module "rds_instance" { major_engine_version = var.major_engine_version max_allocated_storage = var.max_allocated_storage monitoring_interval = var.monitoring_interval - monitoring_role_arn = var.monitoring_interval != "0" ? module.rds_monitoring_role.arn : var.monitoring_role_arn + monitoring_role_arn = var.monitoring_interval != "0" ? module.rds_monitoring_role[0].arn : var.monitoring_role_arn multi_az = var.multi_az option_group_name = var.option_group_name parameter_group_name = var.parameter_group_name @@ -86,6 +88,7 @@ module "rds_instance" { skip_final_snapshot = var.skip_final_snapshot snapshot_identifier = var.snapshot_identifier storage_encrypted = var.storage_encrypted + storage_throughput = var.storage_throughput storage_type = var.storage_type subnet_ids = local.subnet_ids timezone = var.timezone @@ -125,7 +128,9 @@ resource "random_password" "database_password" { module "rds_monitoring_role" { source = "cloudposse/iam-role/aws" - version = "0.16.2" + version = "0.17.0" + + count = var.monitoring_interval != "0" ? 1 : 0 name = "${module.this.name}-rds-enhanced-monitoring-role" enabled = module.this.enabled && var.monitoring_interval != 0 diff --git a/modules/rds/outputs.tf b/modules/rds/outputs.tf index 3dee17fa4..72e21bb88 100644 --- a/modules/rds/outputs.tf +++ b/modules/rds/outputs.tf @@ -1,3 +1,15 @@ +locals { + ssm_path_as_list = split("/", local.rds_database_password_path) + ssm_path_app = trim(join("/", slice(local.ssm_path_as_list, 0, length(local.ssm_path_as_list) - 1)), "/") + ssm_path_password_value = element(local.ssm_path_as_list, length(local.ssm_path_as_list) - 1) + psql_message = < +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | ~> 4.0 | +| [random](#requirement\_random) | ~> 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 4.0 | +| [random](#provider\_random) | ~> 3.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [redshift\_sg](#module\_redshift\_sg) | cloudposse/security-group/aws | 2.0.0-rc1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.4.1 | + +## Resources + +| Name | Type | +|------|------| +| [aws_redshiftserverless_endpoint_access.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/redshiftserverless_endpoint_access) | resource | +| [aws_redshiftserverless_namespace.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/redshiftserverless_namespace) | resource | +| [aws_redshiftserverless_workgroup.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/redshiftserverless_workgroup) | resource | +| [aws_ssm_parameter.admin_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.admin_user](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.endpoint](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [random_password.admin_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | +| [random_pet.admin_user](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_password](#input\_admin\_password) | Password for the master DB user. Required unless a snapshot\_identifier is provided | `string` | `null` | no | +| [admin\_user](#input\_admin\_user) | Username for the master DB user. Required unless a snapshot\_identifier is provided | `string` | `null` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [base\_capacity](#input\_base\_capacity) | The base data warehouse capacity of the workgroup in Redshift Processing Units (RPUs). | `number` | `128` | no | +| [config\_parameter](#input\_config\_parameter) | A list of Redshift config parameters to apply to the workgroup. |
list(object({
parameter_key = string
parameter_value = any
}))
| `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [custom\_sg\_allow\_all\_egress](#input\_custom\_sg\_allow\_all\_egress) | Whether to allow all egress traffic or not | `bool` | `true` | no | +| [custom\_sg\_enabled](#input\_custom\_sg\_enabled) | Whether to use custom security group or not | `bool` | `false` | no | +| [custom\_sg\_rules](#input\_custom\_sg\_rules) | n/a |
list(object({
key = string
type = string
from_port = number
to_port = number
protocol = string
cidr_blocks = list(string)
description = string
}))
| `[]` | no | +| [database\_name](#input\_database\_name) | The name of the first database to be created when the cluster is created | `string` | `null` | no | +| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | The Amazon Resource Name (ARN) of the IAM role to set as a default in the namespace | `string` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [endpoint\_name](#input\_endpoint\_name) | Endpoint name for the redshift endpoint, if null, is set to $stage-$name | `string` | `null` | no | +| [enhanced\_vpc\_routing](#input\_enhanced\_vpc\_routing) | The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC instead of over the internet. | `bool` | `true` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [iam\_roles](#input\_iam\_roles) | A list of IAM roles to associate with the namespace. | `list(string)` | `[]` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [kms\_alias\_name\_ssm](#input\_kms\_alias\_name\_ssm) | KMS alias name for SSM | `string` | `"alias/aws/ssm"` | no | +| [kms\_key\_id](#input\_kms\_key\_id) | The ARN of the Amazon Web Services Key Management Service key used to encrypt your data. | `string` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [log\_exports](#input\_log\_exports) | The types of logs the namespace can export. Available export types are `userlog`, `connectionlog`, and `useractivitylog`. | `set(string)` | `[]` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [publicly\_accessible](#input\_publicly\_accessible) | If true, the cluster can be accessed from a public network | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS region | `string` | n/a | yes | +| [security\_group\_ids](#input\_security\_group\_ids) | An array of security group IDs to associate with the endpoint. | `list(string)` | `null` | no | +| [ssm\_path\_prefix](#input\_ssm\_path\_prefix) | SSM path prefix (without leading or trailing slash) | `string` | `"redshift"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [use\_private\_subnets](#input\_use\_private\_subnets) | Whether to use private or public subnets for the Redshift cluster | `bool` | `true` | no | +| [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | An array of security group IDs to associate with the workgroup. | `list(string)` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [endpoint\_address](#output\_endpoint\_address) | The DNS address of the VPC endpoint. | +| [endpoint\_arn](#output\_endpoint\_arn) | Amazon Resource Name (ARN) of the Redshift Serverless Endpoint Access. | +| [endpoint\_id](#output\_endpoint\_id) | The Redshift Endpoint Access Name. | +| [endpoint\_name](#output\_endpoint\_name) | Endpoint Name. | +| [endpoint\_port](#output\_endpoint\_port) | The port that Amazon Redshift Serverless listens on. | +| [endpoint\_subnet\_ids](#output\_endpoint\_subnet\_ids) | Subnets used in redshift serverless endpoint. | +| [endpoint\_vpc\_endpoint](#output\_endpoint\_vpc\_endpoint) | The VPC endpoint or the Redshift Serverless workgroup. See VPC Endpoint below. | +| [namespace\_arn](#output\_namespace\_arn) | Amazon Resource Name (ARN) of the Redshift Serverless Namespace. | +| [namespace\_id](#output\_namespace\_id) | The Redshift Namespace Name. | +| [namespace\_namespace\_id](#output\_namespace\_namespace\_id) | The Redshift Namespace ID. | +| [workgroup\_arn](#output\_workgroup\_arn) | Amazon Resource Name (ARN) of the Redshift Serverless Workgroup. | +| [workgroup\_endpoint](#output\_workgroup\_endpoint) | The Redshift Serverless Endpoint. | +| [workgroup\_id](#output\_workgroup\_id) | The Redshift Workgroup Name. | +| [workgroup\_workgroup\_id](#output\_workgroup\_workgroup\_id) | The Redshift Workgroup ID. | + + + +## References + * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/redshift) - Cloud Posse's upstream component + + +[](https://cpco.io/component) diff --git a/modules/redshift-serverless/context.tf b/modules/redshift-serverless/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/redshift-serverless/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/redshift-serverless/main.tf b/modules/redshift-serverless/main.tf new file mode 100644 index 000000000..168b40eaf --- /dev/null +++ b/modules/redshift-serverless/main.tf @@ -0,0 +1,109 @@ + +locals { + enabled = module.this.enabled + subnet_ids = var.use_private_subnets ? module.vpc.outputs.private_subnet_ids : module.vpc.outputs.public_subnet_ids + admin_user = var.admin_user != null && var.admin_user != "" ? var.admin_user : join("", random_pet.admin_user.*.id) + admin_password = var.admin_password != null && var.admin_password != "" ? var.admin_password : join("", random_password.admin_password.*.result) +} + +resource "random_pet" "admin_user" { + count = local.enabled && (var.admin_user == null || var.admin_user == "") ? 1 : 0 + + length = 2 + separator = "_" + + keepers = { + db_name = var.database_name + } +} + +resource "random_password" "admin_password" { + count = local.enabled && (var.admin_password == null || var.admin_password == "") ? 1 : 0 + + length = 33 + # Leave special characters out to avoid quoting and other issues. + # Special characters have no additional security compared to increasing length. + special = false + override_special = "!#$%^&*()<>-_" + + keepers = { + db_name = var.database_name + } +} + +module "redshift_sg" { + count = local.enabled && var.custom_sg_enabled ? 1 : 0 + + source = "cloudposse/security-group/aws" + version = "2.0.0-rc1" + + create_before_destroy = true + preserve_security_group_id = true + + attributes = ["redshift"] + + # Allow unlimited egress + allow_all_egress = var.custom_sg_allow_all_egress + + rules = var.custom_sg_rules + + vpc_id = module.vpc.outputs.vpc_id + + context = module.this.context +} + + +resource "aws_redshiftserverless_workgroup" "default" { + count = local.enabled ? 1 : 0 + + namespace_name = aws_redshiftserverless_namespace.default[0].namespace_name + + depends_on = [ + aws_redshiftserverless_namespace.default[0] + ] + + workgroup_name = module.this.id + + base_capacity = var.base_capacity + enhanced_vpc_routing = var.enhanced_vpc_routing + publicly_accessible = var.publicly_accessible + security_group_ids = coalesce(var.security_group_ids, module.redshift_sg[*].id, []) + subnet_ids = local.subnet_ids + + dynamic "config_parameter" { + for_each = var.config_parameter + content { + parameter_key = config_parameter.key + parameter_value = config_parameter.value + } + } + tags = module.this.tags + +} + +resource "aws_redshiftserverless_namespace" "default" { + count = local.enabled ? 1 : 0 + + namespace_name = module.this.id + + admin_user_password = local.admin_password + admin_username = local.admin_user + db_name = var.database_name + default_iam_role_arn = var.default_iam_role_arn + iam_roles = var.iam_roles + kms_key_id = var.kms_key_id + log_exports = var.log_exports + + tags = var.tags +} + + +resource "aws_redshiftserverless_endpoint_access" "default" { + count = local.enabled ? 1 : 0 + + workgroup_name = aws_redshiftserverless_workgroup.default[0].workgroup_name + + endpoint_name = var.endpoint_name == null ? format("%s-%s", module.this.stage, module.this.name) : var.endpoint_name + subnet_ids = local.subnet_ids + vpc_security_group_ids = var.vpc_security_group_ids != null ? var.vpc_security_group_ids : [module.vpc.outputs.vpc_default_security_group_id] +} diff --git a/modules/redshift-serverless/outputs.tf b/modules/redshift-serverless/outputs.tf new file mode 100644 index 000000000..15d07032d --- /dev/null +++ b/modules/redshift-serverless/outputs.tf @@ -0,0 +1,70 @@ +output "endpoint_arn" { + description = "Amazon Resource Name (ARN) of the Redshift Serverless Endpoint Access." + value = join("", aws_redshiftserverless_endpoint_access.default[*].arn) +} + +output "endpoint_id" { + description = "The Redshift Endpoint Access Name." + value = join("", aws_redshiftserverless_endpoint_access.default[*].id) +} + +output "endpoint_address" { + description = "The DNS address of the VPC endpoint." + value = join("", aws_redshiftserverless_endpoint_access.default[*].address) +} + +output "endpoint_port" { + description = "The port that Amazon Redshift Serverless listens on." + value = join("", aws_redshiftserverless_endpoint_access.default[*].port) +} + +output "endpoint_vpc_endpoint" { + description = "The VPC endpoint or the Redshift Serverless workgroup. See VPC Endpoint below." + value = aws_redshiftserverless_endpoint_access.default[0].vpc_endpoint + # value = join("", aws_redshiftserverless_endpoint_access.default[*].vpc_endpoint) +} + +output "endpoint_name" { + description = "Endpoint Name." + value = join("", aws_redshiftserverless_endpoint_access.default[*].endpoint_name) +} + +output "endpoint_subnet_ids" { + description = "Subnets used in redshift serverless endpoint." + value = aws_redshiftserverless_endpoint_access.default[0].subnet_ids +} + +output "namespace_arn" { + description = "Amazon Resource Name (ARN) of the Redshift Serverless Namespace." + value = join("", aws_redshiftserverless_namespace.default[*].arn) +} + +output "namespace_id" { + description = "The Redshift Namespace Name." + value = join("", aws_redshiftserverless_namespace.default[*].id) +} + +output "namespace_namespace_id" { + description = "The Redshift Namespace ID." + value = join("", aws_redshiftserverless_namespace.default[*].namespace_id) +} + +output "workgroup_arn" { + description = "Amazon Resource Name (ARN) of the Redshift Serverless Workgroup." + value = join("", aws_redshiftserverless_workgroup.default[*].arn) +} + +output "workgroup_id" { + description = "The Redshift Workgroup Name." + value = join("", aws_redshiftserverless_workgroup.default[*].id) +} + +output "workgroup_workgroup_id" { + description = "The Redshift Workgroup ID." + value = join("", aws_redshiftserverless_workgroup.default[*].workgroup_id) +} + +output "workgroup_endpoint" { + description = "The Redshift Serverless Endpoint." + value = aws_redshiftserverless_workgroup.default[0].endpoint +} diff --git a/modules/datadog-agent/providers.tf b/modules/redshift-serverless/providers.tf similarity index 74% rename from modules/datadog-agent/providers.tf rename to modules/redshift-serverless/providers.tf index 79558d342..08ee01b2a 100644 --- a/modules/datadog-agent/providers.tf +++ b/modules/redshift-serverless/providers.tf @@ -2,6 +2,7 @@ provider "aws" { region = var.region profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + dynamic "assume_role" { for_each = module.iam_roles.profiles_enabled ? [] : ["role"] content { @@ -26,18 +27,3 @@ variable "import_role_arn" { default = null description = "IAM Role ARN to use when importing a resource" } - -data "aws_eks_cluster" "kubernetes" { - count = local.enabled ? 1 : 0 - - name = module.eks.outputs.eks_cluster_id -} - -data "aws_eks_cluster_auth" "kubernetes" { - count = local.enabled ? 1 : 0 - - name = module.eks.outputs.eks_cluster_id -} - -provider "utils" {} - diff --git a/modules/redshift-serverless/remote-state.tf b/modules/redshift-serverless/remote-state.tf new file mode 100644 index 000000000..3e0ccd51e --- /dev/null +++ b/modules/redshift-serverless/remote-state.tf @@ -0,0 +1,8 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.4.1" + + component = "vpc" + + context = module.this.context +} diff --git a/modules/redshift-serverless/ssm.tf b/modules/redshift-serverless/ssm.tf new file mode 100644 index 000000000..5a4051d70 --- /dev/null +++ b/modules/redshift-serverless/ssm.tf @@ -0,0 +1,30 @@ +resource "aws_ssm_parameter" "admin_user" { + count = local.enabled ? 1 : 0 + + name = format("/%s/%s", var.ssm_path_prefix, "admin_user") + value = local.admin_user + description = "Redshift cluster admin username" + type = "String" + overwrite = true +} + +resource "aws_ssm_parameter" "admin_password" { + count = local.enabled ? 1 : 0 + + name = format("/%s/%s", var.ssm_path_prefix, "admin_password") + value = local.admin_password + description = "Redshift cluster admin password" + type = "SecureString" + key_id = var.kms_alias_name_ssm + overwrite = true +} + +resource "aws_ssm_parameter" "endpoint" { + count = local.enabled ? 1 : 0 + + name = format("/%s/%s", var.ssm_path_prefix, "endpoint") + value = aws_redshiftserverless_workgroup.default[0].endpoint[0].address + description = "Redshift endpoint address" + type = "String" + overwrite = true +} diff --git a/modules/redshift-serverless/variables.tf b/modules/redshift-serverless/variables.tf new file mode 100644 index 000000000..e33765419 --- /dev/null +++ b/modules/redshift-serverless/variables.tf @@ -0,0 +1,136 @@ +variable "region" { + type = string + description = "AWS region" +} + +variable "admin_user" { + type = string + default = null + description = "Username for the master DB user. Required unless a snapshot_identifier is provided" +} + +variable "admin_password" { + type = string + default = null + description = "Password for the master DB user. Required unless a snapshot_identifier is provided" +} + +variable "database_name" { + type = string + default = null + description = "The name of the first database to be created when the cluster is created" +} + +variable "default_iam_role_arn" { + type = string + default = null + description = "The Amazon Resource Name (ARN) of the IAM role to set as a default in the namespace" +} + +variable "iam_roles" { + type = list(string) + default = [] + description = "A list of IAM roles to associate with the namespace." +} + +variable "kms_key_id" { + type = string + default = null + description = "The ARN of the Amazon Web Services Key Management Service key used to encrypt your data." +} + +variable "log_exports" { + type = set(string) + default = [] + description = "The types of logs the namespace can export. Available export types are `userlog`, `connectionlog`, and `useractivitylog`." +} + +variable "use_private_subnets" { + type = bool + default = true + description = "Whether to use private or public subnets for the Redshift cluster" +} + +variable "publicly_accessible" { + type = bool + default = false + description = "If true, the cluster can be accessed from a public network" +} + +// AWS KMS alias used for encryption/decryption of SSM secure strings +variable "kms_alias_name_ssm" { + type = string + default = "alias/aws/ssm" + description = "KMS alias name for SSM" +} + +variable "ssm_path_prefix" { + type = string + default = "redshift" + description = "SSM path prefix (without leading or trailing slash)" +} + +variable "security_group_ids" { + type = list(string) + default = null + description = "An array of security group IDs to associate with the endpoint." +} + +variable "vpc_security_group_ids" { + type = list(string) + default = null + description = "An array of security group IDs to associate with the workgroup." +} + +variable "base_capacity" { + type = number + default = 128 + description = "The base data warehouse capacity of the workgroup in Redshift Processing Units (RPUs)." +} + +variable "config_parameter" { + type = list(object({ + parameter_key = string + parameter_value = any + })) + default = [] + description = "A list of Redshift config parameters to apply to the workgroup." +} + +variable "enhanced_vpc_routing" { + type = bool + default = true + description = "The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC instead of over the internet." +} + +variable "endpoint_name" { + type = string + default = null + description = "Endpoint name for the redshift endpoint, if null, is set to $stage-$name" +} + +variable "custom_sg_enabled" { + type = bool + default = false + description = "Whether to use custom security group or not" +} +variable "custom_sg_allow_all_egress" { + type = bool + default = true + description = "Whether to allow all egress traffic or not" +} + +variable "custom_sg_rules" { + type = list(object({ + key = string + type = string + from_port = number + to_port = number + protocol = string + cidr_blocks = list(string) + description = string + })) + default = [] + description = "Custom security group rules" + +} diff --git a/modules/redshift-serverless/versions.tf b/modules/redshift-serverless/versions.tf new file mode 100644 index 000000000..5b9bb0612 --- /dev/null +++ b/modules/redshift-serverless/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + random = { + source = "hashicorp/random" + version = ">= 3.0" + } + } +} diff --git a/modules/redshift/CHANGELOG.md b/modules/redshift/CHANGELOG.md new file mode 100644 index 000000000..055b6d2c9 --- /dev/null +++ b/modules/redshift/CHANGELOG.md @@ -0,0 +1,7 @@ +## Components PR [Fix components](https://github.com/cloudposse/terraform-aws-components/pull/855) + +This is a bug fix and feature enhancement update. No actions necessary to upgrade. + +## Fixes + +- Fix bug related to the AWS provider `>= 5.0.0` removed `redshift_cluster.cluster_security_groups`. diff --git a/modules/redshift/README.md b/modules/redshift/README.md new file mode 100644 index 000000000..7e5445b11 --- /dev/null +++ b/modules/redshift/README.md @@ -0,0 +1,157 @@ +--- +tags: + - component/redshift + - layer/data + - provider/aws +--- + +# Component: `redshift` + +This component is responsible for provisioning a RedShift instance. It seeds relevant database information (hostnames, +username, password, etc.) into AWS SSM Parameter Store. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +components: + terraform: + redshift: + vars: + enabled: true + name: redshift + database_name: redshift + publicly_accessible: false + node_type: dc2.large + number_of_nodes: 1 + cluster_type: single-node + ssm_enabled: true + log_exports: + - userlog + - connectionlog + - useractivitylog + admin_user: redshift + custom_sg_enabled: true + custom_sg_rules: + - type: ingress + key: postgres + description: Allow inbound traffic to the redshift cluster + from_port: 5439 + to_port: 5439 + protocol: tcp + cidr_blocks: + - 10.0.0.0/8 +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | >= 4.17, <= 4.67.0 | +| [random](#requirement\_random) | >= 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.17, <= 4.67.0 | +| [random](#provider\_random) | >= 3.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [redshift\_cluster](#module\_redshift\_cluster) | cloudposse/redshift-cluster/aws | 1.0.0 | +| [redshift\_sg](#module\_redshift\_sg) | cloudposse/security-group/aws | 2.2.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ssm_parameter.redshift_database_hostname](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.redshift_database_name](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.redshift_database_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.redshift_database_port](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.redshift_database_user](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [random_password.admin_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | +| [random_pet.admin_user](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_password](#input\_admin\_password) | Password for the master DB user. Required unless a snapshot\_identifier is provided | `string` | `null` | no | +| [admin\_user](#input\_admin\_user) | Username for the master DB user. Required unless a snapshot\_identifier is provided | `string` | `null` | no | +| [allow\_version\_upgrade](#input\_allow\_version\_upgrade) | Whether or not to enable major version upgrades which are applied during the maintenance window to the Amazon Redshift engine that is running on the cluster | `bool` | `false` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [cluster\_type](#input\_cluster\_type) | The cluster type to use. Either `single-node` or `multi-node` | `string` | `"single-node"` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [custom\_sg\_allow\_all\_egress](#input\_custom\_sg\_allow\_all\_egress) | Whether to allow all egress traffic or not | `bool` | `true` | no | +| [custom\_sg\_enabled](#input\_custom\_sg\_enabled) | Whether to use custom security group or not | `bool` | `false` | no | +| [custom\_sg\_rules](#input\_custom\_sg\_rules) | An array of custom security groups to create and assign to the cluster. |
list(object({
key = string
type = string
from_port = number
to_port = number
protocol = string
cidr_blocks = list(string)
description = string
}))
| `[]` | no | +| [database\_name](#input\_database\_name) | The name of the first database to be created when the cluster is created | `string` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [engine\_version](#input\_engine\_version) | The version of the Amazon Redshift engine to use. See https://docs.aws.amazon.com/redshift/latest/mgmt/cluster-versions.html | `string` | `"1.0"` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [kms\_alias\_name\_ssm](#input\_kms\_alias\_name\_ssm) | KMS alias name for SSM | `string` | `"alias/aws/ssm"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [node\_type](#input\_node\_type) | The node type to be provisioned for the cluster. See https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#working-with-clusters-overview | `string` | `"dc2.large"` | no | +| [number\_of\_nodes](#input\_number\_of\_nodes) | The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node | `number` | `1` | no | +| [port](#input\_port) | The port number on which the cluster accepts incoming connections | `number` | `5439` | no | +| [publicly\_accessible](#input\_publicly\_accessible) | If true, the cluster can be accessed from a public network | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS region | `string` | n/a | yes | +| [security\_group\_ids](#input\_security\_group\_ids) | An array of security group IDs to associate with the endpoint. | `list(string)` | `null` | no | +| [ssm\_enabled](#input\_ssm\_enabled) | If `true` create SSM keys for the database user and password. | `bool` | `false` | no | +| [ssm\_key\_format](#input\_ssm\_key\_format) | SSM path format. The values will will be used in the following order: `var.ssm_key_prefix`, `var.name`, `var.ssm_key_*` | `string` | `"/%v/%v/%v"` | no | +| [ssm\_key\_hostname](#input\_ssm\_key\_hostname) | The SSM key to save the hostname. See `var.ssm_path_format`. | `string` | `"admin/db_hostname"` | no | +| [ssm\_key\_password](#input\_ssm\_key\_password) | The SSM key to save the password. See `var.ssm_path_format`. | `string` | `"admin/db_password"` | no | +| [ssm\_key\_port](#input\_ssm\_key\_port) | The SSM key to save the port. See `var.ssm_path_format`. | `string` | `"admin/db_port"` | no | +| [ssm\_key\_prefix](#input\_ssm\_key\_prefix) | SSM path prefix. Omit the leading forward slash `/`. | `string` | `"redshift"` | no | +| [ssm\_key\_user](#input\_ssm\_key\_user) | The SSM key to save the user. See `var.ssm_path_format`. | `string` | `"admin/db_user"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [use\_private\_subnets](#input\_use\_private\_subnets) | Whether to use private or public subnets for the Redshift cluster | `bool` | `true` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [arn](#output\_arn) | Amazon Resource Name (ARN) of cluster | +| [cluster\_identifier](#output\_cluster\_identifier) | The Cluster Identifier | +| [cluster\_security\_groups](#output\_cluster\_security\_groups) | The security groups associated with the cluster | +| [database\_name](#output\_database\_name) | The name of the default database in the Cluster | +| [dns\_name](#output\_dns\_name) | The DNS name of the cluster | +| [endpoint](#output\_endpoint) | The connection endpoint | +| [id](#output\_id) | The Redshift Cluster ID | +| [port](#output\_port) | The Port the cluster responds on | +| [redshift\_database\_ssm\_key\_prefix](#output\_redshift\_database\_ssm\_key\_prefix) | SSM prefix | +| [vpc\_security\_group\_ids](#output\_vpc\_security\_group\_ids) | The VPC security group IDs associated with the cluster | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/redshift) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/redshift/context.tf b/modules/redshift/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/redshift/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/redshift/main.tf b/modules/redshift/main.tf new file mode 100644 index 000000000..7c9a4fcba --- /dev/null +++ b/modules/redshift/main.tf @@ -0,0 +1,74 @@ +locals { + enabled = module.this.enabled + subnet_ids = var.use_private_subnets ? module.vpc.outputs.private_subnet_ids : module.vpc.outputs.public_subnet_ids + admin_user = var.admin_user != null && var.admin_user != "" ? var.admin_user : join("", random_pet.admin_user.*.id) + admin_password = var.admin_password != null && var.admin_password != "" ? var.admin_password : join("", random_password.admin_password.*.result) + database_name = var.database_name == null ? module.this.id : var.database_name +} + +resource "random_pet" "admin_user" { + count = local.enabled && (var.admin_user == null || var.admin_user == "") ? 1 : 0 + + length = 2 + separator = "_" + + keepers = { + db_name = var.database_name + } +} + +resource "random_password" "admin_password" { + count = local.enabled && (var.admin_password == null || var.admin_password == "") ? 1 : 0 + + length = 33 + # Leave special characters out to avoid quoting and other issues. + # Special characters have no additional security compared to increasing length. + special = false + override_special = "!#$%^&*()<>-_" + + keepers = { + db_name = var.database_name + } +} + +module "redshift_cluster" { + source = "cloudposse/redshift-cluster/aws" + version = "1.0.0" + + subnet_ids = local.subnet_ids + vpc_security_group_ids = coalesce(var.security_group_ids, module.redshift_sg[*].id, []) + + port = var.port + admin_user = local.admin_user + admin_password = local.admin_password + database_name = local.database_name + node_type = var.node_type + number_of_nodes = var.number_of_nodes + cluster_type = var.cluster_type + engine_version = var.engine_version + publicly_accessible = var.publicly_accessible + allow_version_upgrade = var.allow_version_upgrade + + context = module.this.context +} + +module "redshift_sg" { + count = local.enabled && var.custom_sg_enabled ? 1 : 0 + + source = "cloudposse/security-group/aws" + version = "2.2.0" + + create_before_destroy = true + preserve_security_group_id = true + + attributes = ["redshift"] + + # Allow unlimited egress + allow_all_egress = var.custom_sg_allow_all_egress + + rules = var.custom_sg_rules + + vpc_id = module.vpc.outputs.vpc_id + + context = module.this.context +} diff --git a/modules/redshift/outputs.tf b/modules/redshift/outputs.tf new file mode 100644 index 000000000..185282e9b --- /dev/null +++ b/modules/redshift/outputs.tf @@ -0,0 +1,44 @@ +output "id" { + description = "The Redshift Cluster ID" + value = local.enabled ? module.redshift_cluster.id : null +} + +output "arn" { + description = "Amazon Resource Name (ARN) of cluster" + value = local.enabled ? module.redshift_cluster.arn : null +} + +output "cluster_identifier" { + description = "The Cluster Identifier" + value = local.enabled ? module.redshift_cluster.cluster_identifier : null +} + +output "port" { + description = "The Port the cluster responds on" + value = local.enabled ? module.redshift_cluster.port : null +} + +output "dns_name" { + description = "The DNS name of the cluster" + value = local.enabled ? module.redshift_cluster.dns_name : null +} + +output "vpc_security_group_ids" { + description = "The VPC security group IDs associated with the cluster" + value = local.enabled ? module.redshift_cluster.vpc_security_group_ids : null +} + +output "cluster_security_groups" { + description = "The security groups associated with the cluster" + value = local.enabled ? module.redshift_cluster.cluster_security_groups : null +} + +output "endpoint" { + description = "The connection endpoint" + value = local.enabled ? module.redshift_cluster.endpoint : null +} + +output "database_name" { + description = "The name of the default database in the Cluster" + value = local.enabled ? module.redshift_cluster.database_name : null +} diff --git a/modules/redshift/providers.tf b/modules/redshift/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/redshift/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/redshift/remote-state.tf b/modules/redshift/remote-state.tf new file mode 100644 index 000000000..757ef9067 --- /dev/null +++ b/modules/redshift/remote-state.tf @@ -0,0 +1,8 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "vpc" + + context = module.this.context +} diff --git a/modules/redshift/systems-manager.tf b/modules/redshift/systems-manager.tf new file mode 100644 index 000000000..ce2b541a6 --- /dev/null +++ b/modules/redshift/systems-manager.tf @@ -0,0 +1,108 @@ +# AWS KMS alias used for encryption/decryption of SSM secure strings +variable "kms_alias_name_ssm" { + type = string + default = "alias/aws/ssm" + description = "KMS alias name for SSM" +} + +variable "ssm_enabled" { + type = bool + default = false + description = "If `true` create SSM keys for the database user and password." +} + +variable "ssm_key_format" { + type = string + default = "/%v/%v/%v" + description = "SSM path format. The values will will be used in the following order: `var.ssm_key_prefix`, `var.name`, `var.ssm_key_*`" +} + +variable "ssm_key_prefix" { + type = string + default = "redshift" + description = "SSM path prefix. Omit the leading forward slash `/`." +} + +variable "ssm_key_user" { + type = string + default = "admin/db_user" + description = "The SSM key to save the user. See `var.ssm_path_format`." +} + +variable "ssm_key_password" { + type = string + default = "admin/db_password" + description = "The SSM key to save the password. See `var.ssm_path_format`." +} + +variable "ssm_key_hostname" { + type = string + default = "admin/db_hostname" + description = "The SSM key to save the hostname. See `var.ssm_path_format`." +} + +variable "ssm_key_port" { + type = string + default = "admin/db_port" + description = "The SSM key to save the port. See `var.ssm_path_format`." +} + +locals { + ssm_enabled = local.enabled && var.ssm_enabled +} + +resource "aws_ssm_parameter" "redshift_database_name" { + count = local.ssm_enabled ? 1 : 0 + + name = format(var.ssm_key_format, var.ssm_key_prefix, var.name, var.ssm_key_port) + value = local.database_name + description = "Redshift DB port" + type = "String" + overwrite = true +} + +resource "aws_ssm_parameter" "redshift_database_user" { + count = local.ssm_enabled ? 1 : 0 + + name = format(var.ssm_key_format, var.ssm_key_prefix, var.name, var.ssm_key_user) + value = local.admin_user + description = "Redshift DB user" + type = "String" + overwrite = true +} + +resource "aws_ssm_parameter" "redshift_database_password" { + count = local.ssm_enabled ? 1 : 0 + + name = format(var.ssm_key_format, var.ssm_key_prefix, var.name, var.ssm_key_password) + value = local.admin_password + description = "Redshift DB password" + type = "SecureString" + key_id = var.kms_alias_name_ssm + overwrite = true +} + +resource "aws_ssm_parameter" "redshift_database_hostname" { + count = local.ssm_enabled ? 1 : 0 + + name = format(var.ssm_key_format, var.ssm_key_prefix, var.name, var.ssm_key_hostname) + value = module.redshift_cluster.endpoint + description = "Redshift DB hostname" + type = "String" + overwrite = true +} + +resource "aws_ssm_parameter" "redshift_database_port" { + count = local.ssm_enabled ? 1 : 0 + + name = format(var.ssm_key_format, var.ssm_key_prefix, var.name, var.ssm_key_port) + value = var.port + description = "Redshift DB port" + type = "String" + overwrite = true +} + +output "redshift_database_ssm_key_prefix" { + value = local.ssm_enabled ? format(var.ssm_key_format, var.ssm_key_prefix, var.name, "") : null + description = "SSM prefix" +} diff --git a/modules/redshift/variables.tf b/modules/redshift/variables.tf new file mode 100644 index 000000000..949886068 --- /dev/null +++ b/modules/redshift/variables.tf @@ -0,0 +1,102 @@ +variable "region" { + type = string + description = "AWS region" +} + +variable "port" { + type = number + default = 5439 + description = "The port number on which the cluster accepts incoming connections" +} + +variable "admin_user" { + type = string + default = null + description = "Username for the master DB user. Required unless a snapshot_identifier is provided" +} + +variable "admin_password" { + type = string + default = null + description = "Password for the master DB user. Required unless a snapshot_identifier is provided" +} + +variable "database_name" { + type = string + default = null + description = "The name of the first database to be created when the cluster is created" +} + +variable "node_type" { + type = string + default = "dc2.large" + description = "The node type to be provisioned for the cluster. See https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#working-with-clusters-overview" +} + +variable "number_of_nodes" { + type = number + default = 1 + description = "The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node" +} + +variable "cluster_type" { + type = string + default = "single-node" + description = "The cluster type to use. Either `single-node` or `multi-node`" +} + +variable "engine_version" { + type = string + default = "1.0" + description = "The version of the Amazon Redshift engine to use. See https://docs.aws.amazon.com/redshift/latest/mgmt/cluster-versions.html" +} + +variable "publicly_accessible" { + type = bool + default = false + description = "If true, the cluster can be accessed from a public network" +} + +variable "allow_version_upgrade" { + type = bool + default = false + description = "Whether or not to enable major version upgrades which are applied during the maintenance window to the Amazon Redshift engine that is running on the cluster" +} + +variable "use_private_subnets" { + type = bool + default = true + description = "Whether to use private or public subnets for the Redshift cluster" +} + +variable "security_group_ids" { + type = list(string) + default = null + description = "An array of security group IDs to associate with the endpoint." +} + +variable "custom_sg_enabled" { + type = bool + default = false + description = "Whether to use custom security group or not" +} + +variable "custom_sg_allow_all_egress" { + type = bool + default = true + description = "Whether to allow all egress traffic or not" +} + +variable "custom_sg_rules" { + type = list(object({ + key = string + type = string + from_port = number + to_port = number + protocol = string + cidr_blocks = list(string) + description = string + })) + default = [] + description = "An array of custom security groups to create and assign to the cluster." +} diff --git a/modules/redshift/versions.tf b/modules/redshift/versions.tf new file mode 100644 index 000000000..4eeaaf38e --- /dev/null +++ b/modules/redshift/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.17, <= 4.67.0" + } + random = { + source = "hashicorp/random" + version = ">= 3.0" + } + } +} diff --git a/modules/route53-resolver-dns-firewall/README.md b/modules/route53-resolver-dns-firewall/README.md new file mode 100644 index 000000000..8b8ba8a58 --- /dev/null +++ b/modules/route53-resolver-dns-firewall/README.md @@ -0,0 +1,173 @@ +--- +tags: + - component/route53-resolver-dns-firewall + - layer/unassigned + - provider/aws +--- + +# Component: `route53-resolver-dns-firewall` + +This component is responsible for provisioning +[Route 53 Resolver DNS Firewall](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver-dns-firewall.html) +resources, including Route 53 Resolver DNS Firewall, domain lists, firewall rule groups, firewall rules, and logging +configuration. + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +# stacks/catalog/route53-resolver-dns-firewall/defaults.yaml +components: + terraform: + route53-resolver-dns-firewall/defaults: + metadata: + type: abstract + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + firewall_fail_open: "ENABLED" + query_log_enabled: true + logs_bucket_component_name: "route53-resolver-dns-firewall-logs-bucket" + domains_config: + allowed-domains: + # Concat the lists of domains passed in the `domains` field and loaded from the file `domains_file` + # The file is in the `components/terraform/route53-resolver-dns-firewall/config` folder + domains_file: "config/allowed_domains.txt" + domains: [] + blocked-domains: + # Concat the lists of domains passed in the `domains` field and loaded from the file `domains_file` + # The file is in the `components/terraform/route53-resolver-dns-firewall/config` folder + domains_file: "config/blocked_domains.txt" + domains: [] + rule_groups_config: + blocked-and-allowed-domains: + # 'priority' must be between 100 and 9900 exclusive + priority: 101 + rules: + allowed-domains: + firewall_domain_list_name: "allowed-domains" + # 'priority' must be between 100 and 9900 exclusive + priority: 101 + action: "ALLOW" + blocked-domains: + firewall_domain_list_name: "blocked-domains" + # 'priority' must be between 100 and 9900 exclusive + priority: 200 + action: "BLOCK" + block_response: "NXDOMAIN" +``` + +```yaml +# stacks/mixins/stage/dev.yaml +import: + - catalog/route53-resolver-dns-firewall/defaults + +components: + terraform: + route53-resolver-dns-firewall/example: + metadata: + component: route53-resolver-dns-firewall + inherits: + - route53-resolver-dns-firewall/defaults + vars: + name: route53-dns-firewall-example + vpc_component_name: vpc +``` + +Execute the following command to provision the `route53-resolver-dns-firewall/example` component using Atmos: + +```shell +atmos terraform apply route53-resolver-dns-firewall/example -s +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [logs\_bucket](#module\_logs\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [route53\_resolver\_dns\_firewall](#module\_route53\_resolver\_dns\_firewall) | cloudposse/route53-resolver-dns-firewall/aws | 0.2.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [domains\_config](#input\_domains\_config) | Map of Route 53 Resolver DNS Firewall domain configurations |
map(object({
domains = optional(list(string))
domains_file = optional(string)
}))
| n/a | yes | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [firewall\_fail\_open](#input\_firewall\_fail\_open) | Determines how Route 53 Resolver handles queries during failures, for example when all traffic that is sent to DNS Firewall fails to receive a reply.
By default, fail open is disabled, which means the failure mode is closed.
This approach favors security over availability. DNS Firewall blocks queries that it is unable to evaluate properly.
If you enable this option, the failure mode is open. This approach favors availability over security.
In this case, DNS Firewall allows queries to proceed if it is unable to properly evaluate them.
Valid values: ENABLED, DISABLED. | `string` | `"ENABLED"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [logs\_bucket\_component\_name](#input\_logs\_bucket\_component\_name) | Flow logs bucket component name | `string` | `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [query\_log\_config\_name](#input\_query\_log\_config\_name) | Route 53 Resolver query log config name. If omitted, the name will be generated by concatenating the ID from the context with the VPC ID | `string` | `null` | no | +| [query\_log\_enabled](#input\_query\_log\_enabled) | Flag to enable/disable Route 53 Resolver query logging | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [rule\_groups\_config](#input\_rule\_groups\_config) | Rule groups and rules configuration |
map(object({
priority = number
mutation_protection = optional(string)
# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_resolver_firewall_rule
rules = map(object({
action = string
priority = number
block_override_dns_type = optional(string)
block_override_domain = optional(string)
block_override_ttl = optional(number)
block_response = optional(string)
firewall_domain_list_name = string
}))
}))
| n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | The name of a VPC component where the Network Firewall is provisioned | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [domains](#output\_domains) | Route 53 Resolver DNS Firewall domain configurations | +| [query\_log\_config](#output\_query\_log\_config) | Route 53 Resolver query logging configuration | +| [rule\_group\_associations](#output\_rule\_group\_associations) | Route 53 Resolver DNS Firewall rule group associations | +| [rule\_groups](#output\_rule\_groups) | Route 53 Resolver DNS Firewall rule groups | +| [rules](#output\_rules) | Route 53 Resolver DNS Firewall rules | + + + +## References + +- [Deploy centralized traffic filtering using AWS Network Firewall](https://aws.amazon.com/blogs/networking-and-content-delivery/deploy-centralized-traffic-filtering-using-aws-network-firewall) +- [AWS Network Firewall – New Managed Firewall Service in VPC](https://aws.amazon.com/blogs/aws/aws-network-firewall-new-managed-firewall-service-in-vpc) +- [Deployment models for AWS Network Firewall](https://aws.amazon.com/blogs/networking-and-content-delivery/deployment-models-for-aws-network-firewall) +- [Deployment models for AWS Network Firewall with VPC routing enhancements](https://aws.amazon.com/blogs/networking-and-content-delivery/deployment-models-for-aws-network-firewall-with-vpc-routing-enhancements) +- [Inspection Deployment Models with AWS Network Firewall](https://d1.awsstatic.com/architecture-diagrams/ArchitectureDiagrams/inspection-deployment-models-with-AWS-network-firewall-ra.pdf) +- [How to deploy AWS Network Firewall by using AWS Firewall Manager](https://aws.amazon.com/blogs/security/how-to-deploy-aws-network-firewall-by-using-aws-firewall-manager) +- [A Deep Dive into AWS Transit Gateway](https://www.youtube.com/watch?v=a55Iud-66q0) +- [Appliance in a shared services VPC](https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-appliance-scenario.html) +- [Quotas on Route 53 Resolver DNS Firewall](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-entities-resolver) +- [Unified bad hosts](https://github.com/StevenBlack/hosts) +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/TODO) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/route53-resolver-dns-firewall/config/allowed_domains.txt b/modules/route53-resolver-dns-firewall/config/allowed_domains.txt new file mode 100644 index 000000000..d9ca0d0a9 --- /dev/null +++ b/modules/route53-resolver-dns-firewall/config/allowed_domains.txt @@ -0,0 +1 @@ +example.com. diff --git a/modules/route53-resolver-dns-firewall/config/blocked_domains.txt b/modules/route53-resolver-dns-firewall/config/blocked_domains.txt new file mode 100644 index 000000000..b30b0bad9 --- /dev/null +++ b/modules/route53-resolver-dns-firewall/config/blocked_domains.txt @@ -0,0 +1,48 @@ +accesscu.ca. +alterna.ca. +battlerivercreditunion.com. +bayviewnb.com. +belgianalliancecu.mb.ca. +blueshorefinancial.com. +caissepopclare.com. +caseracu.ca. +cccu.ca. +cdcu.com. +chinookfinancial.com. +comsavings.com. +comtechfirecu.com. +conexus.ca. +copperfin.ca. +diamondnorthcu.com. +eaglerivercu.com. +eastcoastcu.ca. +eccu.ca. +ekccu.com. +encompasscu.ca. +entegra.ca. +envisionfinancial.ca. +firstcu.ca. +firstontariocu.com. +fnbc.ca. +frontlinecu.com. +implicity.ca. +innovationcu.ca. +lakelandcreditunion.com. +lambtonfinancial.ca. +lecu.ca. +local183cu.ca. +mtlehman.com. +newrosscreditunion.ca. +nivervillecu.mb.ca. +northsave.com. +noventis.ca. +npscu.ca. +peacehills.com. +prospera.ca. +pscu.ca. +rpcul.com. +sdcu.com. +sprucecu.bc.ca. +stridecu.ca. +sudburycu.com. +synergycu.ca. diff --git a/modules/route53-resolver-dns-firewall/context.tf b/modules/route53-resolver-dns-firewall/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/route53-resolver-dns-firewall/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/route53-resolver-dns-firewall/main.tf b/modules/route53-resolver-dns-firewall/main.tf new file mode 100644 index 000000000..429a71b50 --- /dev/null +++ b/modules/route53-resolver-dns-firewall/main.tf @@ -0,0 +1,25 @@ +locals { + enabled = module.this.enabled + query_log_enabled = local.enabled && var.query_log_enabled + + vpc_outputs = module.vpc.outputs + vpc_id = local.vpc_outputs.vpc_id + + logs_bucket_outputs = module.logs_bucket.outputs + logs_bucket_arn = local.logs_bucket_outputs.bucket_arn +} + +module "route53_resolver_dns_firewall" { + source = "cloudposse/route53-resolver-dns-firewall/aws" + version = "0.2.1" + + vpc_id = local.vpc_id + query_log_destination_arn = local.logs_bucket_arn + query_log_enabled = local.query_log_enabled + firewall_fail_open = var.firewall_fail_open + query_log_config_name = var.query_log_config_name + domains_config = var.domains_config + rule_groups_config = var.rule_groups_config + + context = module.this.context +} diff --git a/modules/route53-resolver-dns-firewall/outputs.tf b/modules/route53-resolver-dns-firewall/outputs.tf new file mode 100644 index 000000000..7190dcdbb --- /dev/null +++ b/modules/route53-resolver-dns-firewall/outputs.tf @@ -0,0 +1,24 @@ +output "query_log_config" { + value = module.route53_resolver_dns_firewall.query_log_config + description = "Route 53 Resolver query logging configuration" +} + +output "domains" { + value = module.route53_resolver_dns_firewall.domains + description = "Route 53 Resolver DNS Firewall domain configurations" +} + +output "rule_groups" { + value = module.route53_resolver_dns_firewall.rule_groups + description = "Route 53 Resolver DNS Firewall rule groups" +} + +output "rule_group_associations" { + value = module.route53_resolver_dns_firewall.rule_group_associations + description = "Route 53 Resolver DNS Firewall rule group associations" +} + +output "rules" { + value = module.route53_resolver_dns_firewall.rules + description = "Route 53 Resolver DNS Firewall rules" +} diff --git a/modules/route53-resolver-dns-firewall/providers.tf b/modules/route53-resolver-dns-firewall/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/route53-resolver-dns-firewall/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/route53-resolver-dns-firewall/remote-state.tf b/modules/route53-resolver-dns-firewall/remote-state.tf new file mode 100644 index 000000000..e80e04f1e --- /dev/null +++ b/modules/route53-resolver-dns-firewall/remote-state.tf @@ -0,0 +1,25 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.vpc_component_name + + context = module.this.context +} + +module "logs_bucket" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.logs_bucket_component_name + + bypass = !local.query_log_enabled || var.logs_bucket_component_name == null || var.logs_bucket_component_name == "" + ignore_errors = !local.query_log_enabled || var.logs_bucket_component_name == null || var.logs_bucket_component_name == "" + + defaults = { + bucket_id = "" + bucket_arn = "" + } + + context = module.this.context +} diff --git a/modules/route53-resolver-dns-firewall/variables.tf b/modules/route53-resolver-dns-firewall/variables.tf new file mode 100644 index 000000000..c8d905387 --- /dev/null +++ b/modules/route53-resolver-dns-firewall/variables.tf @@ -0,0 +1,68 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "vpc_component_name" { + type = string + description = "The name of a VPC component where the Network Firewall is provisioned" +} + +variable "logs_bucket_component_name" { + type = string + description = "Flow logs bucket component name" + default = null +} + +variable "firewall_fail_open" { + type = string + description = <<-EOF + Determines how Route 53 Resolver handles queries during failures, for example when all traffic that is sent to DNS Firewall fails to receive a reply. + By default, fail open is disabled, which means the failure mode is closed. + This approach favors security over availability. DNS Firewall blocks queries that it is unable to evaluate properly. + If you enable this option, the failure mode is open. This approach favors availability over security. + In this case, DNS Firewall allows queries to proceed if it is unable to properly evaluate them. + Valid values: ENABLED, DISABLED. + EOF + default = "ENABLED" +} + +variable "query_log_enabled" { + type = bool + description = "Flag to enable/disable Route 53 Resolver query logging" + default = false +} + +variable "query_log_config_name" { + type = string + description = "Route 53 Resolver query log config name. If omitted, the name will be generated by concatenating the ID from the context with the VPC ID" + default = null +} + +# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_resolver_firewall_domain_list +variable "domains_config" { + type = map(object({ + domains = optional(list(string)) + domains_file = optional(string) + })) + description = "Map of Route 53 Resolver DNS Firewall domain configurations" +} + +# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_resolver_firewall_rule_group +variable "rule_groups_config" { + type = map(object({ + priority = number + mutation_protection = optional(string) + # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_resolver_firewall_rule + rules = map(object({ + action = string + priority = number + block_override_dns_type = optional(string) + block_override_domain = optional(string) + block_override_ttl = optional(number) + block_response = optional(string) + firewall_domain_list_name = string + })) + })) + description = "Rule groups and rules configuration" +} diff --git a/modules/route53-resolver-dns-firewall/versions.tf b/modules/route53-resolver-dns-firewall/versions.tf new file mode 100644 index 000000000..b5920b7b1 --- /dev/null +++ b/modules/route53-resolver-dns-firewall/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/s3-bucket/README.md b/modules/s3-bucket/README.md index a684ed2ec..218d3b4a2 100644 --- a/modules/s3-bucket/README.md +++ b/modules/s3-bucket/README.md @@ -1,3 +1,11 @@ +--- +tags: + - component/s3-bucket + - layer/addons + - layer/gitops + - provider/aws +--- + # Component: `s3-bucket` This component is responsible for provisioning S3 buckets. @@ -51,12 +59,11 @@ components: days: 90 expiration: days: 120 - ``` ```yaml import: -- catalog/s3/defaults + - catalog/s3/defaults components: terraform: @@ -74,29 +81,31 @@ components: prefix: logs/ ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [template](#requirement\_template) | >= 2.2.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | -| [template](#provider\_template) | n/a | +| [aws](#provider\_aws) | >= 4.0 | +| [template](#provider\_template) | >= 2.2.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [bucket\_policy](#module\_bucket\_policy) | cloudposse/iam-policy/aws | 0.4.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [s3\_bucket](#module\_s3\_bucket) | cloudposse/s3-bucket/aws | 3.0.0 | +| [s3\_bucket](#module\_s3\_bucket) | cloudposse/s3-bucket/aws | 3.1.1 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -105,7 +114,7 @@ components: |------|------| | [aws_iam_policy_document.custom_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | -| [template_file.bucket_policy](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.bucket_policy](https://registry.terraform.io/providers/cloudposse/template/latest/docs/data-sources/file) | data source | ## Inputs @@ -123,7 +132,7 @@ components: | [block\_public\_acls](#input\_block\_public\_acls) | Set to `false` to disable the blocking of new public access lists on the bucket | `bool` | `true` | no | | [block\_public\_policy](#input\_block\_public\_policy) | Set to `false` to disable the blocking of new public policies on the bucket | `bool` | `true` | no | | [bucket\_key\_enabled](#input\_bucket\_key\_enabled) | Set this to true to use Amazon S3 Bucket Keys for SSE-KMS, which reduce the cost of AWS KMS requests.
For more information, see: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html | `bool` | `false` | no | -| [bucket\_name](#input\_bucket\_name) | Bucket name. If provided, the bucket will be created with this name instead of generating the name from the context | `string` | `null` | no | +| [bucket\_name](#input\_bucket\_name) | Bucket name. If provided, the bucket will be created with this name instead of generating the name from the context | `string` | `""` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [cors\_configuration](#input\_cors\_configuration) | Specifies the allowed headers, methods, origins and exposed headers when using CORS on this bucket |
list(object({
allowed_headers = list(string)
allowed_methods = list(string)
allowed_origins = list(string)
expose_headers = list(string)
max_age_seconds = number
}))
| `null` | no | | [custom\_policy\_account\_names](#input\_custom\_policy\_account\_names) | List of accounts names to assign as principals for the s3 bucket custom policy | `list(string)` | `[]` | no | @@ -138,8 +147,6 @@ components: | [iam\_policy\_statements](#input\_iam\_policy\_statements) | Map of IAM policy statements to use in the bucket policy. | `any` | `{}` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | | [ignore\_public\_acls](#input\_ignore\_public\_acls) | Set to `false` to disable the ignoring of public access lists on the bucket | `bool` | `true` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kms\_master\_key\_arn](#input\_kms\_master\_key\_arn) | The AWS KMS master key ARN used for the `SSE-KMS` encryption. This can only be used when you set the value of `sse_algorithm` as `aws:kms`. The default aws/s3 AWS KMS master key is used if this element is absent while the `sse_algorithm` is `aws:kms` | `string` | `""` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | @@ -183,10 +190,11 @@ components: | [bucket\_region](#output\_bucket\_region) | Bucket region | | [bucket\_regional\_domain\_name](#output\_bucket\_regional\_domain\_name) | Bucket region-specific domain name | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/s3-bucket) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/s3-bucket) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/s3-bucket/main.tf b/modules/s3-bucket/main.tf index 2035e0b05..10a47f478 100644 --- a/modules/s3-bucket/main.tf +++ b/modules/s3-bucket/main.tf @@ -37,7 +37,7 @@ module "bucket_policy" { module "s3_bucket" { source = "cloudposse/s3-bucket/aws" - version = "3.0.0" + version = "3.1.1" bucket_name = var.bucket_name @@ -54,6 +54,7 @@ module "s3_bucket" { source_policy_documents = [local.bucket_policy] privileged_principal_actions = var.privileged_principal_actions privileged_principal_arns = var.privileged_principal_arns + s3_object_ownership = var.s3_object_ownership # Static website configuration cors_configuration = var.cors_configuration diff --git a/modules/s3-bucket/providers.tf b/modules/s3-bucket/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/s3-bucket/providers.tf +++ b/modules/s3-bucket/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/s3-bucket/remote-state.tf b/modules/s3-bucket/remote-state.tf index 89c2a7fc0..69f657564 100644 --- a/modules/s3-bucket/remote-state.tf +++ b/modules/s3-bucket/remote-state.tf @@ -1,6 +1,6 @@ module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "account-map" environment = var.account_map_environment_name diff --git a/modules/s3-bucket/variables.tf b/modules/s3-bucket/variables.tf index d1fee56c8..bbbb425e4 100644 --- a/modules/s3-bucket/variables.tf +++ b/modules/s3-bucket/variables.tf @@ -296,7 +296,7 @@ variable "s3_replication_source_roles" { variable "bucket_name" { type = string - default = null + default = "" description = "Bucket name. If provided, the bucket will be created with this name instead of generating the name from the context" } @@ -389,4 +389,3 @@ variable "iam_policy_statements" { description = "Map of IAM policy statements to use in the bucket policy." default = {} } - diff --git a/modules/s3-bucket/versions.tf b/modules/s3-bucket/versions.tf index e89eb16ed..da561d739 100644 --- a/modules/s3-bucket/versions.tf +++ b/modules/s3-bucket/versions.tf @@ -4,7 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" + } + template = { + source = "cloudposse/template" + version = ">= 2.2.0" } } } diff --git a/modules/security-hub/README.md b/modules/security-hub/README.md new file mode 100644 index 000000000..cfea17b41 --- /dev/null +++ b/modules/security-hub/README.md @@ -0,0 +1,254 @@ +--- +tags: + - component/security-hub + - layer/security-and-compliance + - provider/aws +--- + +# Component: `security-hub` + +This component is responsible for configuring Security Hub within an AWS Organization. + +Amazon Security Hub enables users to centrally manage and monitor the security and compliance of their AWS accounts and +resources. It aggregates, organizes, and prioritizes security findings from various AWS services, third-party tools, and +integrated partner solutions. + +Here are the key features and capabilities of Amazon Security Hub: + +- Centralized security management: Security Hub provides a centralized dashboard where users can view and manage + security findings from multiple AWS accounts and regions. This allows for a unified view of the security posture + across the entire AWS environment. + +- Automated security checks: Security Hub automatically performs continuous security checks on AWS resources, + configurations, and security best practices. It leverages industry standards and compliance frameworks, such as AWS + CIS Foundations Benchmark, to identify potential security issues. + +- Integrated partner solutions: Security Hub integrates with a wide range of AWS native services, as well as third-party + security products and solutions. This integration enables the ingestion and analysis of security findings from diverse + sources, offering a comprehensive security view. + +- Security standards and compliance: Security Hub provides compliance checks against industry standards and regulatory + frameworks, such as PCI DSS, HIPAA, and GDPR. It identifies non-compliant resources and provides guidance on + remediation actions to ensure adherence to security best practices. + +- Prioritized security findings: Security Hub analyzes and prioritizes security findings based on severity, enabling + users to focus on the most critical issues. It assigns severity levels and generates a consolidated view of security + alerts, allowing for efficient threat response and remediation. + +- Custom insights and event aggregation: Security Hub supports custom insights, allowing users to create their own rules + and filters to focus on specific security criteria or requirements. It also provides event aggregation and correlation + capabilities to identify related security findings and potential attack patterns. + +- Integration with other AWS services: Security Hub seamlessly integrates with other AWS services, such as AWS + CloudTrail, Amazon GuardDuty, AWS Config, and AWS IAM Access Analyzer. This integration allows for enhanced + visibility, automated remediation, and streamlined security operations. + +- Alert notifications and automation: Security Hub supports alert notifications through Amazon SNS, enabling users to + receive real-time notifications of security findings. It also facilitates automation and response through integration + with AWS Lambda, allowing for automated remediation actions. + +By utilizing Amazon Security Hub, organizations can improve their security posture, gain insights into security risks, +and effectively manage security compliance across their AWS accounts and resources. + +## Usage + +**Stack Level**: Regional + +## Deployment Overview + +This component is complex in that it must be deployed multiple times with different variables set to configure the AWS +Organization successfully. + +It is further complicated by the fact that you must deploy each of the component instances described below to every +region that existed before March 2019 and to any regions that have been opted-in as described in the +[AWS Documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-regions). + +In the examples below, we assume that the AWS Organization Management account is `root` and the AWS Organization +Delegated Administrator account is `security`, both in the `core` tenant. + +### Deploy to Delegated Administrator Account + +First, the component is deployed to the +[Delegated Administrator](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_organizations.html) account in each +region to configure the Security Hub instance to which each account will send its findings. + +```yaml +# core-ue1-security +components: + terraform: + security-hub/delegated-administrator/ue1: + metadata: + component: security-hub + vars: + enabled: true + delegated_administrator_account_name: core-security + environment: ue1 + region: us-east-1 +``` + +```bash +atmos terraform apply security-hub/delegated-administrator/ue1 -s core-ue1-security +atmos terraform apply security-hub/delegated-administrator/ue2 -s core-ue2-security +atmos terraform apply security-hub/delegated-administrator/uw1 -s core-uw1-security +# ... other regions +``` + +### Deploy to Organization Management (root) Account + +Next, the component is deployed to the AWS Organization Management (a/k/a `root`) Account in order to set the AWS +Organization Designated Administrator account. + +Note that `SuperAdmin` permissions must be used as we are deploying to the AWS Organization Management account. Since we +are using the `SuperAdmin` user, it will already have access to the state bucket, so we set the `role_arn` of the +backend config to null and set `var.privileged` to `true`. + +```yaml +# core-ue1-root +components: + terraform: + security-hub/root/ue1: + metadata: + component: security-hub + backend: + s3: + role_arn: null + vars: + enabled: true + delegated_administrator_account_name: core-security + environment: ue1 + region: us-east-1 + privileged: true +``` + +```bash +atmos terraform apply security-hub/root/ue1 -s core-ue1-root +atmos terraform apply security-hub/root/ue2 -s core-ue2-root +atmos terraform apply security-hub/root/uw1 -s core-uw1-root +# ... other regions +``` + +### Deploy Organization Settings in Delegated Administrator Account + +Finally, the component is deployed to the Delegated Administrator Account again in order to create the organization-wide +Security Hub configuration for the AWS Organization, but with `var.admin_delegated` set to `true` this time to indicate +that the delegation from the Organization Management account has already been performed. + +```yaml +# core-ue1-security +components: + terraform: + security-hub/org-settings/ue1: + metadata: + component: security-hub + vars: + enabled: true + delegated_administrator_account_name: core-security + environment: use1 + region: us-east-1 + admin_delegated: true +``` + +```bash +atmos terraform apply security-hub/org-settings/ue1 -s core-ue1-security +atmos terraform apply security-hub/org-settings/ue2 -s core-ue2-security +atmos terraform apply security-hub/org-settings/uw1 -s core-uw1-security +# ... other regions +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 5.0 | +| [awsutils](#requirement\_awsutils) | >= 0.16.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 5.0 | +| [awsutils](#provider\_awsutils) | >= 0.16.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [security\_hub](#module\_security\_hub) | cloudposse/security-hub/aws | 0.10.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_securityhub_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_account) | resource | +| [aws_securityhub_organization_admin_account.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_organization_admin_account) | resource | +| [aws_securityhub_organization_configuration.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_organization_configuration) | resource | +| [awsutils_security_hub_organization_settings.this](https://registry.terraform.io/providers/cloudposse/awsutils/latest/docs/resources/security_hub_organization_settings) | resource | +| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_region.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_tenant](#input\_account\_map\_tenant) | The tenant where the `account_map` component required by remote-state is deployed | `string` | `"core"` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_delegated](#input\_admin\_delegated) | A flag to indicate if the AWS Organization-wide settings should be created. This can only be done after the Security
Hub Administrator account has already been delegated from the AWS Org Management account (usually 'root'). See the
Deployment section of the README for more information. | `bool` | `false` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [auto\_enable\_organization\_members](#input\_auto\_enable\_organization\_members) | Flag to toggle auto-enablement of Security Hub for new member accounts in the organization.

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_organization_configuration#auto_enable | `bool` | `true` | no | +| [cloudwatch\_event\_rule\_pattern\_detail\_type](#input\_cloudwatch\_event\_rule\_pattern\_detail\_type) | The detail-type pattern used to match events that will be sent to SNS.

For more information, see:
https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html
https://docs.aws.amazon.com/eventbridge/latest/userguide/event-types.html | `string` | `"ecurity Hub Findings - Imported"` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [create\_sns\_topic](#input\_create\_sns\_topic) | Flag to indicate whether an SNS topic should be created for notifications. If you want to send findings to a new SNS
topic, set this to true and provide a valid configuration for subscribers. | `bool` | `false` | no | +| [default\_standards\_enabled](#input\_default\_standards\_enabled) | Flag to indicate whether default standards should be enabled | `bool` | `true` | no | +| [delegated\_administrator\_account\_name](#input\_delegated\_administrator\_account\_name) | The name of the account that is the AWS Organization Delegated Administrator account | `string` | `"core-security"` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [enabled\_standards](#input\_enabled\_standards) | A list of standards to enable in the account.

For example:
- standards/aws-foundational-security-best-practices/v/1.0.0
- ruleset/cis-aws-foundations-benchmark/v/1.2.0
- standards/pci-dss/v/3.2.1
- standards/cis-aws-foundations-benchmark/v/1.4.0 | `set(string)` | `[]` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [finding\_aggregation\_region](#input\_finding\_aggregation\_region) | If finding aggregation is enabled, the region that collects findings | `string` | `null` | no | +| [finding\_aggregator\_enabled](#input\_finding\_aggregator\_enabled) | Flag to indicate whether a finding aggregator should be created

If you want to aggregate findings from one region, set this to `true`.

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/securityhub_finding_aggregator | `bool` | `false` | no | +| [finding\_aggregator\_linking\_mode](#input\_finding\_aggregator\_linking\_mode) | Linking mode to use for the finding aggregator.

The possible values are:
- `ALL_REGIONS` - Aggregate from all regions
- `ALL_REGIONS_EXCEPT_SPECIFIED` - Aggregate from all regions except those specified in `var.finding_aggregator_regions`
- `SPECIFIED_REGIONS` - Aggregate from regions specified in `var.finding_aggregator_regions` | `string` | `"ALL_REGIONS"` | no | +| [finding\_aggregator\_regions](#input\_finding\_aggregator\_regions) | A list of regions to aggregate findings from.

This is only used if `finding_aggregator_enabled` is `true`. | `any` | `null` | no | +| [findings\_notification\_arn](#input\_findings\_notification\_arn) | The ARN for an SNS topic to send findings notifications to. This is only used if create\_sns\_topic is false.
If you want to send findings to an existing SNS topic, set this to the ARN of the existing topic and set
create\_sns\_topic to false. | `string` | `null` | no | +| [global\_environment](#input\_global\_environment) | Global environment name | `string` | `"gbl"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | +| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [organization\_management\_account\_name](#input\_organization\_management\_account\_name) | The name of the AWS Organization management account | `string` | `null` | no | +| [privileged](#input\_privileged) | true if the default provider already has access to the backend | `bool` | `false` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [root\_account\_stage](#input\_root\_account\_stage) | The stage name for the Organization root (management) account. This is used to lookup account IDs from account names
using the `account-map` component. | `string` | `"root"` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [subscribers](#input\_subscribers) | A map of subscription configurations for SNS topics

For more information, see:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic_subscription#argument-reference

protocol:
The protocol to use. The possible values for this are: sqs, sms, lambda, application. (http or https are partially
supported, see link) (email is an option but is unsupported in terraform, see link).
endpoint:
The endpoint to send data to, the contents will vary with the protocol. (see link for more information)
endpoint\_auto\_confirms:
Boolean indicating whether the end point is capable of auto confirming subscription e.g., PagerDuty. Default is
false.
raw\_message\_delivery:
Boolean indicating whether or not to enable raw message delivery (the original message is directly passed, not
wrapped in JSON with the original message in the message property). Default is false. |
map(object({
protocol = string
endpoint = string
endpoint_auto_confirms = bool
raw_message_delivery = bool
}))
| `{}` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [delegated\_administrator\_account\_id](#output\_delegated\_administrator\_account\_id) | The AWS Account ID of the AWS Organization delegated administrator account | +| [sns\_topic\_name](#output\_sns\_topic\_name) | The name of the SNS topic created by the component | +| [sns\_topic\_subscriptions](#output\_sns\_topic\_subscriptions) | The SNS topic subscriptions created by the component | + + + +## References + +- [AWS Security Hub Documentation](https://aws.amazon.com/security-hub/) +- [Cloud Posse's upstream component](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/security-hub) + +[](https://cpco.io/component) diff --git a/modules/security-hub/context.tf b/modules/security-hub/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/security-hub/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/security-hub/main.tf b/modules/security-hub/main.tf new file mode 100644 index 000000000..d3ae10b08 --- /dev/null +++ b/modules/security-hub/main.tf @@ -0,0 +1,77 @@ +locals { + enabled = module.this.enabled + account_map = module.account_map.outputs.full_account_map + + current_account_id = one(data.aws_caller_identity.this[*].account_id) + member_account_id_list = [for a in keys(local.account_map) : (local.account_map[a]) if local.account_map[a] != local.current_account_id] + org_delegated_administrator_account_id = local.account_map[var.delegated_administrator_account_name] + org_management_account_id = var.organization_management_account_name == null ? local.account_map[module.account_map.outputs.root_account_account_name] : local.account_map[var.organization_management_account_name] + is_org_delegated_administrator_account = local.current_account_id == local.org_delegated_administrator_account_id + is_org_management_account = local.current_account_id == local.org_management_account_id + is_finding_aggregation_region = local.enabled && var.finding_aggregator_enabled && var.finding_aggregation_region == data.aws_region.this[0].name + + create_sns_topic = local.enabled && var.create_sns_topic + create_securityhub = local.enabled && local.is_org_delegated_administrator_account && !var.admin_delegated + create_org_delegation = local.enabled && local.is_org_management_account + create_org_configuration = local.enabled && local.is_org_delegated_administrator_account && var.admin_delegated +} + +data "aws_caller_identity" "this" { + count = local.enabled ? 1 : 0 +} + +data "aws_region" "this" { + count = local.enabled ? 1 : 0 +} + +# If we are running in the AWS Org Management account, delegate Security Hub to the Delegated Administrator account +# (usually the security account). We also need to turn on Security Hub in the Management account so that it can +# aggregate findings and be managed by the Delegated Administrator account. +resource "aws_securityhub_organization_admin_account" "this" { + count = local.create_org_delegation ? 1 : 0 + + admin_account_id = local.org_delegated_administrator_account_id +} + +resource "aws_securityhub_account" "this" { + count = local.create_org_delegation ? 1 : 0 + + enable_default_standards = var.default_standards_enabled +} + +# If we are running in the AWS Org designated administrator account, enable Security Hub and optionally enable standards +# and finding aggregation +module "security_hub" { + count = local.create_securityhub ? 1 : 0 + source = "cloudposse/security-hub/aws" + version = "0.10.0" + + + cloudwatch_event_rule_pattern_detail_type = var.cloudwatch_event_rule_pattern_detail_type + create_sns_topic = local.create_sns_topic + enable_default_standards = var.default_standards_enabled + enabled_standards = var.enabled_standards + finding_aggregator_enabled = local.is_finding_aggregation_region + finding_aggregator_linking_mode = var.finding_aggregator_linking_mode + finding_aggregator_regions = var.finding_aggregator_regions + imported_findings_notification_arn = var.findings_notification_arn + subscribers = var.subscribers + + context = module.this.context +} + +# If we are running in the AWS Org designated administrator account with admin_delegated set to tru, set the AWS +# Organization-wide Security Hub configuration by configuring all other accounts to send their Security Hub findings to +# this account. +resource "awsutils_security_hub_organization_settings" "this" { + count = local.create_org_configuration ? 1 : 0 + + member_accounts = local.member_account_id_list +} + +resource "aws_securityhub_organization_configuration" "this" { + count = local.create_org_configuration ? 1 : 0 + + auto_enable = var.auto_enable_organization_members + auto_enable_standards = var.default_standards_enabled ? "DEFAULT" : "NONE" +} diff --git a/modules/security-hub/outputs.tf b/modules/security-hub/outputs.tf new file mode 100644 index 000000000..542055299 --- /dev/null +++ b/modules/security-hub/outputs.tf @@ -0,0 +1,14 @@ +output "delegated_administrator_account_id" { + value = local.org_delegated_administrator_account_id + description = "The AWS Account ID of the AWS Organization delegated administrator account" +} + +output "sns_topic_name" { + value = local.create_securityhub ? try(module.security_hub[0].sns_topic.name, null) : null + description = "The name of the SNS topic created by the component" +} + +output "sns_topic_subscriptions" { + value = local.create_securityhub ? try(module.security_hub[0].sns_topic_subscriptions, null) : null + description = "The SNS topic subscriptions created by the component" +} diff --git a/modules/security-hub/providers.tf b/modules/security-hub/providers.tf new file mode 100644 index 000000000..eed361d44 --- /dev/null +++ b/modules/security-hub/providers.tf @@ -0,0 +1,43 @@ + +provider "aws" { + region = var.region + + profile = !var.privileged && module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + dynamic "assume_role" { + for_each = var.privileged || module.iam_roles.profiles_enabled ? [] : ["role"] + content { + role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + } + } +} + +provider "awsutils" { + region = var.region + + profile = !var.privileged && module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + dynamic "assume_role" { + for_each = var.privileged || module.iam_roles.profiles_enabled ? [] : ["role"] + content { + role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + privileged = var.privileged + + context = module.this.context +} + +variable "import_profile_name" { + type = string + default = null + description = "AWS Profile name to use when importing a resource" +} + +variable "import_role_arn" { + type = string + default = null + description = "IAM Role ARN to use when importing a resource" +} diff --git a/modules/security-hub/remote-state.tf b/modules/security-hub/remote-state.tf new file mode 100644 index 000000000..d9c31bca2 --- /dev/null +++ b/modules/security-hub/remote-state.tf @@ -0,0 +1,12 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + tenant = var.account_map_tenant != "" ? var.account_map_tenant : module.this.tenant + stage = var.root_account_stage + environment = var.global_environment + privileged = var.privileged + + context = module.this.context +} diff --git a/modules/security-hub/variables.tf b/modules/security-hub/variables.tf new file mode 100644 index 000000000..d94639026 --- /dev/null +++ b/modules/security-hub/variables.tf @@ -0,0 +1,187 @@ +variable "account_map_tenant" { + type = string + default = "core" + description = "The tenant where the `account_map` component required by remote-state is deployed" +} + +variable "admin_delegated" { + type = bool + default = false + description = < ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [awsutils](#requirement\_awsutils) | >= 0.11.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.3 | +| [dns\_gbl\_delegated](#module\_dns\_gbl\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [kms\_key\_ses](#module\_kms\_key\_ses) | cloudposse/kms-key/aws | 0.12.1 | | [ses](#module\_ses) | cloudposse/ses/aws | 0.22.3 | -| [ssm\_parameter\_store](#module\_ssm\_parameter\_store) | cloudposse/ssm-parameter-store/aws | 0.10.0 | +| [ssm\_parameter\_store](#module\_ssm\_parameter\_store) | cloudposse/ssm-parameter-store/aws | 0.11.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -73,8 +82,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -99,9 +106,11 @@ components: | [user\_name](#output\_user\_name) | Normalized name of the IAM user with permission to send emails from SES domain | | [user\_unique\_id](#output\_user\_unique\_id) | The unique ID of the IAM user with permission to send emails from SES domain | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/ses) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/ses) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/ses/default.auto.tfvars b/modules/ses/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/ses/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/ses/main.tf b/modules/ses/main.tf index c2b66836d..fb1aa4fa2 100644 --- a/modules/ses/main.tf +++ b/modules/ses/main.tf @@ -37,7 +37,7 @@ module "kms_key_ses" { module "ssm_parameter_store" { source = "cloudposse/ssm-parameter-store/aws" - version = "0.10.0" + version = "0.11.0" count = local.enabled ? 1 : 0 diff --git a/modules/ses/outputs.tf b/modules/ses/outputs.tf index d5a919005..1bf1c8598 100644 --- a/modules/ses/outputs.tf +++ b/modules/ses/outputs.tf @@ -22,4 +22,4 @@ output "user_unique_id" { output "user_arn" { value = module.ses.user_arn description = "The ARN the IAM user with permission to send emails from SES domain" -} \ No newline at end of file +} diff --git a/modules/ses/provider-awsutils.mixin.tf b/modules/ses/provider-awsutils.mixin.tf new file mode 100644 index 000000000..70fa8d095 --- /dev/null +++ b/modules/ses/provider-awsutils.mixin.tf @@ -0,0 +1,14 @@ +provider "awsutils" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/ses/providers.tf b/modules/ses/providers.tf index aeed337fa..ef923e10a 100644 --- a/modules/ses/providers.tf +++ b/modules/ses/providers.tf @@ -1,27 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) - } - } -} - -provider "awsutils" { - # TODO: remove skip_region_validation until awsutils 0.11.1 can be downloaded from the registry - skip_region_validation = true - region = var.region - - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null - - dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] - content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -30,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/ses/remote-state.tf b/modules/ses/remote-state.tf index 5fcc2261f..2e0d4da3c 100644 --- a/modules/ses/remote-state.tf +++ b/modules/ses/remote-state.tf @@ -1,6 +1,6 @@ module "dns_gbl_delegated" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.3" + version = "1.5.0" component = "dns-delegated" environment = "gbl" diff --git a/modules/ses/versions.tf b/modules/ses/versions.tf index 7c5de18a2..c32df7c10 100644 --- a/modules/ses/versions.tf +++ b/modules/ses/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } awsutils = { source = "cloudposse/awsutils" diff --git a/modules/sftp/README.md b/modules/sftp/README.md index 9d5989ba7..460ba8f2c 100644 --- a/modules/sftp/README.md +++ b/modules/sftp/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/sftp + - layer/unassigned + - provider/aws +--- + # Component: `sftp` This component is responsible for provisioning SFTP Endpoints. @@ -19,13 +26,14 @@ components: enabled: true ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | | [awsutils](#requirement\_awsutils) | >= 0.11.0 | | [local](#requirement\_local) | >= 2.0 | @@ -33,7 +41,7 @@ components: | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.0 | ## Modules @@ -44,7 +52,7 @@ components: | [security\_group](#module\_security\_group) | cloudposse/security-group/aws | 1.0.1 | | [sftp](#module\_sftp) | cloudposse/transfer-sftp/aws | 1.2.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -71,7 +79,6 @@ components: | [force\_destroy](#input\_force\_destroy) | Forces the AWS Transfer Server to be destroyed | `bool` | `false` | no | | [hosted\_zone\_suffix](#input\_hosted\_zone\_suffix) | The hosted zone name suffix. The stage name will be prefixed to this suffix. | `string` | n/a | yes | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -97,9 +104,11 @@ components: |------|-------------| | [sftp](#output\_sftp) | The SFTP module outputs | + ## References -* [cloudposse/terraform-aws-transfer-sftp](https://github.com/cloudposse/terraform-aws-transfer-sftp) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-transfer-sftp](https://github.com/cloudposse/terraform-aws-transfer-sftp) - Cloud Posse's + upstream component [](https://cpco.io/component) diff --git a/modules/sftp/default.auto.tfvars b/modules/sftp/default.auto.tfvars deleted file mode 100644 index 47f94fb9b..000000000 --- a/modules/sftp/default.auto.tfvars +++ /dev/null @@ -1 +0,0 @@ -enabled = false diff --git a/modules/sftp/provider-awsutils.mixin.tf b/modules/sftp/provider-awsutils.mixin.tf new file mode 100644 index 000000000..70fa8d095 --- /dev/null +++ b/modules/sftp/provider-awsutils.mixin.tf @@ -0,0 +1,14 @@ +provider "awsutils" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/sftp/providers.tf b/modules/sftp/providers.tf index 8d0c33309..ef923e10a 100644 --- a/modules/sftp/providers.tf +++ b/modules/sftp/providers.tf @@ -1,25 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) - } - } -} - -provider "awsutils" { - region = var.region - - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null - - dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] - content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -28,9 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/sftp/remote-state.tf b/modules/sftp/remote-state.tf index 1b1079219..757ef9067 100644 --- a/modules/sftp/remote-state.tf +++ b/modules/sftp/remote-state.tf @@ -1,6 +1,6 @@ module "vpc" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "vpc" diff --git a/modules/sftp/versions.tf b/modules/sftp/versions.tf index 32d228e73..fb4436329 100644 --- a/modules/sftp/versions.tf +++ b/modules/sftp/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } awsutils = { source = "cloudposse/awsutils" diff --git a/modules/site-to-site-vpn/README.md b/modules/site-to-site-vpn/README.md new file mode 100644 index 000000000..3e432621c --- /dev/null +++ b/modules/site-to-site-vpn/README.md @@ -0,0 +1,236 @@ +--- +tags: + - component/site-to-site-vpn + - layer/network + - provider/aws +--- + +# Component: `site-to-site-vpn` + +This component provisions a [Site-To-Site VPN](https://aws.amazon.com/vpn/site-to-site-vpn/) with a +target AWS VPC on one side of the tunnel. +The other (customer) side can be any VPN gateway endpoint, e.g. a hardware device, other cloud VPN, etc. + +AWS Site-to-Site VPN is a fully-managed service that creates a secure connection between your data center or branch +office and your AWS resources using IP Security (IPSec) tunnels. When using Site-to-Site VPN, you can connect to both +your Amazon Virtual Private Clouds (VPC) and AWS Transit Gateway, and two tunnels per connection are used for +increased redundancy. + +The component provisions the following resources: + +- AWS Virtual Private Gateway (a representation of the AWS side of the tunnel) + +- AWS Customer Gateway (a representation of the other (remote) side of the tunnel). It requires: + - The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN) + - `/32` IP of the VPN endpoint + +- AWS Site-To-Site VPN connection. It creates two VPN tunnels for redundancy and requires: + - The IP CIDR ranges on each side of the tunnel + - Pre-shared Keys for each tunnel (can be auto-generated if not provided and saved into SSM Parameter Store) + - (Optional) IP CIDR ranges to be used inside each VPN tunnel + +- Route table entries to direct the appropriate traffic from the local VPC to the other side of the tunnel + +## Post-tunnel creation requirements + +Once the site-to-site VPN resources are deployed, you need to send the VPN configuration +from AWS side to the administrator of the remote side of the VPN connection. To do this: + +1. Determine the infrastructure that will be used for the remote side, specifically: + +- Vendor +- Platform +- Software Version +- IKE version + +1. Log into the target AWS account +1. Go to the "VPC" console +1. On the left navigation manu, go to `Virtual Private Network` > `Site-to-Site VPN Connections` +1. Select the VPN connection that was created via this component +1. On the top right, click the `Download Configuration` button +1. Enter the information you obtained and click `Download` +1. Send the configuration file to the administrator of the remote side of the tunnel + +## Usage + +**Stack Level**: Regional + +```yaml +components: + terraform: + site-to-site-vpn: + metadata: + component: site-to-site-vpn + vars: + enabled: true + name: "site-to-site-vpn" + vpc_component_name: vpc + customer_gateway_bgp_asn: 65000 + customer_gateway_ip_address: 20.200.30.0 + vpn_gateway_amazon_side_asn: 64512 + vpn_connection_static_routes_only: true + vpn_connection_tunnel1_inside_cidr: 169.254.20.0/30 + vpn_connection_tunnel2_inside_cidr: 169.254.21.0/30 + vpn_connection_local_ipv4_network_cidr: 10.100.128.0/24 + vpn_connection_remote_ipv4_network_cidr: 10.10.80.0/24 + vpn_connection_static_routes_destinations: + - 10.100.128.0/24 + vpn_connection_tunnel1_startup_action: add + vpn_connection_tunnel2_startup_action: add + transit_gateway_enabled: false + vpn_connection_tunnel1_cloudwatch_log_enabled: false + vpn_connection_tunnel2_cloudwatch_log_enabled: false + preshared_key_enabled: true + ssm_enabled: true + ssm_path_prefix: "/site-to-site-vpn" +````` + +## Amazon side Autonomous System Number (ASN) + +The variable `vpn_gateway_amazon_side_asn` (Amazon side Autonomous System Number) is not strictly required when creating +an AWS VPN Gateway. If you do not specify Amazon side ASN during the creation of the VPN Gateway, AWS will automatically +assign a default ASN (which is 7224 for the Amazon side of the VPN). + +However, specifying Amazon side ASN can be important if you need to integrate the VPN with an on-premises network that +uses Border Gateway Protocol (BGP) and you want to avoid ASN conflicts or require a specific ASN for routing policies. + +If your use case involves BGP peering, and you need a specific ASN for the Amazon side, then you should explicitly set +the `vpn_gateway_amazon_side_asn`. Otherwise, it can be omitted (set to `null`), and AWS will handle it automatically. + +## Provisioning + +Provision the `site-to-site-vpn` component by executing the following commands: + +```sh +atmos terraform plan site-to-site-vpn -s +atmos terraform apply site-to-site-vpn -s +``` + +## References + +- https://aws.amazon.com/vpn/site-to-site-vpn +- https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html +- https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VpnTunnelOptionsSpecification.html + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 4.0 | +| [random](#requirement\_random) | >= 2.2 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | +| [random](#provider\_random) | >= 2.2 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [site\_to\_site\_vpn](#module\_site\_to\_site\_vpn) | cloudposse/vpn-connection/aws | 1.3.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ssm_parameter.tunnel1_preshared_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.tunnel2_preshared_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [random_password.tunnel1_preshared_key](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | +| [random_password.tunnel2_preshared_key](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [customer\_gateway\_bgp\_asn](#input\_customer\_gateway\_bgp\_asn) | The Customer Gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN) | `number` | n/a | yes | +| [customer\_gateway\_ip\_address](#input\_customer\_gateway\_ip\_address) | The IPv4 address for the Customer Gateway device's outside interface. Set to `null` to not create the Customer Gateway | `string` | `null` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [existing\_transit\_gateway\_id](#input\_existing\_transit\_gateway\_id) | Existing Transit Gateway ID. If provided, the module will not create a Virtual Private Gateway but instead will use the transit\_gateway. For setting up transit gateway we can use the cloudposse/transit-gateway/aws module and pass the output transit\_gateway\_id to this variable | `string` | `""` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [preshared\_key\_enabled](#input\_preshared\_key\_enabled) | Flag to enable adding the preshared keys to the VPN connection | `bool` | `true` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [ssm\_enabled](#input\_ssm\_enabled) | Flag to enable saving the `tunnel1_preshared_key` and `tunnel2_preshared_key` in the SSM Parameter Store | `bool` | `false` | no | +| [ssm\_path\_prefix](#input\_ssm\_path\_prefix) | SSM Key path prefix for the associated SSM parameters | `string` | `""` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [transit\_gateway\_enabled](#input\_transit\_gateway\_enabled) | Set to true to enable VPN connection to transit gateway and then pass in the existing\_transit\_gateway\_id | `bool` | `false` | no | +| [transit\_gateway\_route\_table\_id](#input\_transit\_gateway\_route\_table\_id) | The ID of the route table for the transit gateway that you want to associate + propagate the VPN connection's TGW attachment | `string` | `null` | no | +| [transit\_gateway\_routes](#input\_transit\_gateway\_routes) | A map of transit gateway routes to create on the given TGW route table (via `transit_gateway_route_table_id`) for the created VPN Attachment. Use the key in the map to describe the route |
map(object({
blackhole = optional(bool, false)
destination_cidr_block = string
}))
| `{}` | no | +| [vpc\_component\_name](#input\_vpc\_component\_name) | Atmos VPC component name | `string` | `"vpc"` | no | +| [vpn\_connection\_local\_ipv4\_network\_cidr](#input\_vpn\_connection\_local\_ipv4\_network\_cidr) | The IPv4 CIDR on the Customer Gateway (on-premises) side of the VPN connection | `string` | `"0.0.0.0/0"` | no | +| [vpn\_connection\_log\_retention\_in\_days](#input\_vpn\_connection\_log\_retention\_in\_days) | Specifies the number of days you want to retain log events | `number` | `30` | no | +| [vpn\_connection\_remote\_ipv4\_network\_cidr](#input\_vpn\_connection\_remote\_ipv4\_network\_cidr) | The IPv4 CIDR on the AWS side of the VPN connection | `string` | `"0.0.0.0/0"` | no | +| [vpn\_connection\_static\_routes\_destinations](#input\_vpn\_connection\_static\_routes\_destinations) | List of CIDR blocks to be used as destination for static routes. Routes to destinations will be propagated to the VPC route tables | `list(string)` | `[]` | no | +| [vpn\_connection\_static\_routes\_only](#input\_vpn\_connection\_static\_routes\_only) | If set to `true`, the VPN connection will use static routes exclusively. Static routes must be used for devices that don't support BGP | `bool` | `false` | no | +| [vpn\_connection\_tunnel1\_cloudwatch\_log\_enabled](#input\_vpn\_connection\_tunnel1\_cloudwatch\_log\_enabled) | Enable or disable VPN tunnel logging feature for the tunnel | `bool` | `false` | no | +| [vpn\_connection\_tunnel1\_cloudwatch\_log\_output\_format](#input\_vpn\_connection\_tunnel1\_cloudwatch\_log\_output\_format) | Set log format for the tunnel. Default format is json. Possible values are `json` and `text` | `string` | `"json"` | no | +| [vpn\_connection\_tunnel1\_dpd\_timeout\_action](#input\_vpn\_connection\_tunnel1\_dpd\_timeout\_action) | The action to take after DPD timeout occurs for the first VPN tunnel. Specify restart to restart the IKE initiation. Specify `clear` to end the IKE session. Valid values are `clear` \| `none` \| `restart` | `string` | `"clear"` | no | +| [vpn\_connection\_tunnel1\_ike\_versions](#input\_vpn\_connection\_tunnel1\_ike\_versions) | The IKE versions that are permitted for the first VPN tunnel. Valid values are ikev1 \| ikev2 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel1\_inside\_cidr](#input\_vpn\_connection\_tunnel1\_inside\_cidr) | The CIDR block of the inside IP addresses for the first VPN tunnel | `string` | `null` | no | +| [vpn\_connection\_tunnel1\_phase1\_dh\_group\_numbers](#input\_vpn\_connection\_tunnel1\_phase1\_dh\_group\_numbers) | List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are 2 \| 5 \| 14 \| 15 \| 16 \| 17 \| 18 \| 19 \| 20 \| 21 \| 22 \| 23 \| 24 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel1\_phase1\_encryption\_algorithms](#input\_vpn\_connection\_tunnel1\_phase1\_encryption\_algorithms) | List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 \| AES256 \| AES128-GCM-16 \| AES256-GCM-16 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel1\_phase1\_integrity\_algorithms](#input\_vpn\_connection\_tunnel1\_phase1\_integrity\_algorithms) | One or more integrity algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 \| SHA2-256 \| SHA2-384 \| SHA2-512 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel1\_phase2\_dh\_group\_numbers](#input\_vpn\_connection\_tunnel1\_phase2\_dh\_group\_numbers) | List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are 2 \| 5 \| 14 \| 15 \| 16 \| 17 \| 18 \| 19 \| 20 \| 21 \| 22 \| 23 \| 24 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel1\_phase2\_encryption\_algorithms](#input\_vpn\_connection\_tunnel1\_phase2\_encryption\_algorithms) | List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 \| AES256 \| AES128-GCM-16 \| AES256-GCM-16 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel1\_phase2\_integrity\_algorithms](#input\_vpn\_connection\_tunnel1\_phase2\_integrity\_algorithms) | One or more integrity algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 \| SHA2-256 \| SHA2-384 \| SHA2-512 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel1\_preshared\_key](#input\_vpn\_connection\_tunnel1\_preshared\_key) | The preshared key of the first VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero. Allowed characters are alphanumeric characters, periods(.) and underscores(\_) | `string` | `null` | no | +| [vpn\_connection\_tunnel1\_startup\_action](#input\_vpn\_connection\_tunnel1\_startup\_action) | The action to take when the establishing the tunnel for the first VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify `start` for AWS to initiate the IKE negotiation. Valid values are `add` \| `start` | `string` | `"add"` | no | +| [vpn\_connection\_tunnel2\_cloudwatch\_log\_enabled](#input\_vpn\_connection\_tunnel2\_cloudwatch\_log\_enabled) | Enable or disable VPN tunnel logging feature for the tunnel | `bool` | `false` | no | +| [vpn\_connection\_tunnel2\_cloudwatch\_log\_output\_format](#input\_vpn\_connection\_tunnel2\_cloudwatch\_log\_output\_format) | Set log format for the tunnel. Default format is json. Possible values are `json` and `text` | `string` | `"json"` | no | +| [vpn\_connection\_tunnel2\_dpd\_timeout\_action](#input\_vpn\_connection\_tunnel2\_dpd\_timeout\_action) | The action to take after DPD timeout occurs for the second VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are `clear` \| `none` \| `restart` | `string` | `"clear"` | no | +| [vpn\_connection\_tunnel2\_ike\_versions](#input\_vpn\_connection\_tunnel2\_ike\_versions) | The IKE versions that are permitted for the second VPN tunnel. Valid values are ikev1 \| ikev2 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel2\_inside\_cidr](#input\_vpn\_connection\_tunnel2\_inside\_cidr) | The CIDR block of the inside IP addresses for the second VPN tunnel | `string` | `null` | no | +| [vpn\_connection\_tunnel2\_phase1\_dh\_group\_numbers](#input\_vpn\_connection\_tunnel2\_phase1\_dh\_group\_numbers) | List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are 2 \| 5 \| 14 \| 15 \| 16 \| 17 \| 18 \| 19 \| 20 \| 21 \| 22 \| 23 \| 24 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel2\_phase1\_encryption\_algorithms](#input\_vpn\_connection\_tunnel2\_phase1\_encryption\_algorithms) | List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 \| AES256 \| AES128-GCM-16 \| AES256-GCM-16 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel2\_phase1\_integrity\_algorithms](#input\_vpn\_connection\_tunnel2\_phase1\_integrity\_algorithms) | One or more integrity algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 \| SHA2-256 \| SHA2-384 \| SHA2-512 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel2\_phase2\_dh\_group\_numbers](#input\_vpn\_connection\_tunnel2\_phase2\_dh\_group\_numbers) | List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are 2 \| 5 \| 14 \| 15 \| 16 \| 17 \| 18 \| 19 \| 20 \| 21 \| 22 \| 23 \| 24 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel2\_phase2\_encryption\_algorithms](#input\_vpn\_connection\_tunnel2\_phase2\_encryption\_algorithms) | List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 \| AES256 \| AES128-GCM-16 \| AES256-GCM-16 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel2\_phase2\_integrity\_algorithms](#input\_vpn\_connection\_tunnel2\_phase2\_integrity\_algorithms) | One or more integrity algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 \| SHA2-256 \| SHA2-384 \| SHA2-512 | `list(string)` | `[]` | no | +| [vpn\_connection\_tunnel2\_preshared\_key](#input\_vpn\_connection\_tunnel2\_preshared\_key) | The preshared key of the second VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero. Allowed characters are alphanumeric characters, periods(.) and underscores(\_) | `string` | `null` | no | +| [vpn\_connection\_tunnel2\_startup\_action](#input\_vpn\_connection\_tunnel2\_startup\_action) | The action to take when the establishing the tunnel for the second VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify `start` for AWS to initiate the IKE negotiation. Valid values are `add` \| `start` | `string` | `"add"` | no | +| [vpn\_gateway\_amazon\_side\_asn](#input\_vpn\_gateway\_amazon\_side\_asn) | The Autonomous System Number (ASN) for the Amazon side of the VPN Gateway. If you don't specify an ASN, the Virtual Private Gateway is created with the default ASN | `number` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [customer\_gateway\_id](#output\_customer\_gateway\_id) | Customer Gateway ID | +| [vpn\_connection\_customer\_gateway\_configuration](#output\_vpn\_connection\_customer\_gateway\_configuration) | The configuration information for the VPN connection's Customer Gateway (in the native XML format) | +| [vpn\_connection\_id](#output\_vpn\_connection\_id) | VPN Connection ID | +| [vpn\_connection\_tunnel1\_address](#output\_vpn\_connection\_tunnel1\_address) | The public IP address of the first VPN tunnel | +| [vpn\_connection\_tunnel1\_cgw\_inside\_address](#output\_vpn\_connection\_tunnel1\_cgw\_inside\_address) | The RFC 6890 link-local address of the first VPN tunnel (Customer Gateway side) | +| [vpn\_connection\_tunnel1\_vgw\_inside\_address](#output\_vpn\_connection\_tunnel1\_vgw\_inside\_address) | The RFC 6890 link-local address of the first VPN tunnel (Virtual Private Gateway side) | +| [vpn\_connection\_tunnel2\_address](#output\_vpn\_connection\_tunnel2\_address) | The public IP address of the second VPN tunnel | +| [vpn\_connection\_tunnel2\_cgw\_inside\_address](#output\_vpn\_connection\_tunnel2\_cgw\_inside\_address) | The RFC 6890 link-local address of the second VPN tunnel (Customer Gateway side) | +| [vpn\_connection\_tunnel2\_vgw\_inside\_address](#output\_vpn\_connection\_tunnel2\_vgw\_inside\_address) | The RFC 6890 link-local address of the second VPN tunnel (Virtual Private Gateway side) | +| [vpn\_gateway\_id](#output\_vpn\_gateway\_id) | Virtual Private Gateway ID | + + + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/site-to-site-vpn) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/site-to-site-vpn/context.tf b/modules/site-to-site-vpn/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/site-to-site-vpn/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/site-to-site-vpn/main.tf b/modules/site-to-site-vpn/main.tf new file mode 100644 index 000000000..e930bb5af --- /dev/null +++ b/modules/site-to-site-vpn/main.tf @@ -0,0 +1,84 @@ +locals { + enabled = module.this.enabled + vpc_outputs = module.vpc.outputs + + preshared_key_enabled = local.enabled && var.preshared_key_enabled + + tunnel1_preshared_key = local.preshared_key_enabled ? ( + length(var.vpn_connection_tunnel1_preshared_key) > 0 ? var.vpn_connection_tunnel1_preshared_key : + one(random_password.tunnel1_preshared_key[*].result) + ) : null + + tunnel2_preshared_key = local.preshared_key_enabled ? ( + length(var.vpn_connection_tunnel2_preshared_key) > 0 ? var.vpn_connection_tunnel2_preshared_key : + one(random_password.tunnel2_preshared_key[*].result) + ) : null +} + +module "site_to_site_vpn" { + source = "cloudposse/vpn-connection/aws" + version = "1.3.0" + + vpc_id = local.vpc_outputs.vpc_id + vpn_gateway_amazon_side_asn = var.vpn_gateway_amazon_side_asn + customer_gateway_bgp_asn = var.customer_gateway_bgp_asn + customer_gateway_ip_address = var.customer_gateway_ip_address + route_table_ids = local.vpc_outputs.private_route_table_ids + vpn_connection_static_routes_only = var.vpn_connection_static_routes_only + vpn_connection_static_routes_destinations = var.vpn_connection_static_routes_destinations + vpn_connection_tunnel1_inside_cidr = var.vpn_connection_tunnel1_inside_cidr + vpn_connection_tunnel2_inside_cidr = var.vpn_connection_tunnel2_inside_cidr + vpn_connection_tunnel1_preshared_key = local.tunnel1_preshared_key + vpn_connection_tunnel2_preshared_key = local.tunnel2_preshared_key + vpn_connection_local_ipv4_network_cidr = var.vpn_connection_local_ipv4_network_cidr + vpn_connection_remote_ipv4_network_cidr = var.vpn_connection_remote_ipv4_network_cidr + vpn_connection_tunnel1_ike_versions = var.vpn_connection_tunnel1_ike_versions + vpn_connection_tunnel2_ike_versions = var.vpn_connection_tunnel2_ike_versions + vpn_connection_tunnel1_phase1_encryption_algorithms = var.vpn_connection_tunnel1_phase1_encryption_algorithms + vpn_connection_tunnel1_phase2_encryption_algorithms = var.vpn_connection_tunnel1_phase2_encryption_algorithms + vpn_connection_tunnel1_phase1_integrity_algorithms = var.vpn_connection_tunnel1_phase1_integrity_algorithms + vpn_connection_tunnel1_phase2_integrity_algorithms = var.vpn_connection_tunnel1_phase2_integrity_algorithms + vpn_connection_tunnel2_phase1_encryption_algorithms = var.vpn_connection_tunnel2_phase1_encryption_algorithms + vpn_connection_tunnel2_phase2_encryption_algorithms = var.vpn_connection_tunnel2_phase2_encryption_algorithms + vpn_connection_tunnel2_phase1_integrity_algorithms = var.vpn_connection_tunnel2_phase1_integrity_algorithms + vpn_connection_tunnel2_phase2_integrity_algorithms = var.vpn_connection_tunnel2_phase2_integrity_algorithms + vpn_connection_tunnel1_phase1_dh_group_numbers = var.vpn_connection_tunnel1_phase1_dh_group_numbers + vpn_connection_tunnel1_phase2_dh_group_numbers = var.vpn_connection_tunnel1_phase2_dh_group_numbers + vpn_connection_tunnel2_phase1_dh_group_numbers = var.vpn_connection_tunnel2_phase1_dh_group_numbers + vpn_connection_tunnel2_phase2_dh_group_numbers = var.vpn_connection_tunnel2_phase2_dh_group_numbers + vpn_connection_tunnel1_startup_action = var.vpn_connection_tunnel1_startup_action + vpn_connection_tunnel2_startup_action = var.vpn_connection_tunnel2_startup_action + vpn_connection_log_retention_in_days = var.vpn_connection_log_retention_in_days + vpn_connection_tunnel1_dpd_timeout_action = var.vpn_connection_tunnel1_dpd_timeout_action + vpn_connection_tunnel2_dpd_timeout_action = var.vpn_connection_tunnel2_dpd_timeout_action + vpn_connection_tunnel1_cloudwatch_log_enabled = var.vpn_connection_tunnel1_cloudwatch_log_enabled + vpn_connection_tunnel2_cloudwatch_log_enabled = var.vpn_connection_tunnel2_cloudwatch_log_enabled + vpn_connection_tunnel1_cloudwatch_log_output_format = var.vpn_connection_tunnel1_cloudwatch_log_output_format + vpn_connection_tunnel2_cloudwatch_log_output_format = var.vpn_connection_tunnel2_cloudwatch_log_output_format + transit_gateway_enabled = var.transit_gateway_enabled + existing_transit_gateway_id = var.existing_transit_gateway_id + transit_gateway_route_table_id = var.transit_gateway_route_table_id + transit_gateway_routes = var.transit_gateway_routes + + context = module.this.context +} + +resource "random_password" "tunnel1_preshared_key" { + count = local.preshared_key_enabled && length(var.vpn_connection_tunnel1_preshared_key) == 0 ? 1 : 0 + + length = 60 + # Leave special characters out to avoid quoting and other issues. + # Special characters have no additional security compared to increasing length. + special = false + override_special = "!#$%^&*()<>-_" +} + +resource "random_password" "tunnel2_preshared_key" { + count = local.preshared_key_enabled && length(var.vpn_connection_tunnel2_preshared_key) == 0 ? 1 : 0 + + length = 60 + # Leave special characters out to avoid quoting and other issues. + # Special characters have no additional security compared to increasing length. + special = false + override_special = "!#$%^&*()<>-_" +} diff --git a/modules/site-to-site-vpn/outputs.tf b/modules/site-to-site-vpn/outputs.tf new file mode 100644 index 000000000..ab2251f39 --- /dev/null +++ b/modules/site-to-site-vpn/outputs.tf @@ -0,0 +1,50 @@ +output "vpn_gateway_id" { + description = "Virtual Private Gateway ID" + value = module.vpn_connection.vpn_connection_id +} + +output "customer_gateway_id" { + description = "Customer Gateway ID" + value = module.vpn_connection.customer_gateway_id +} + +output "vpn_connection_id" { + description = "VPN Connection ID" + value = module.vpn_connection.vpn_connection_id +} + +output "vpn_connection_customer_gateway_configuration" { + description = "The configuration information for the VPN connection's Customer Gateway (in the native XML format)" + sensitive = true + value = module.vpn_connection.vpn_connection_customer_gateway_configuration +} + +output "vpn_connection_tunnel1_address" { + description = "The public IP address of the first VPN tunnel" + value = module.vpn_connection.vpn_connection_tunnel1_address +} + +output "vpn_connection_tunnel1_cgw_inside_address" { + description = "The RFC 6890 link-local address of the first VPN tunnel (Customer Gateway side)" + value = module.vpn_connection.vpn_connection_tunnel1_cgw_inside_address +} + +output "vpn_connection_tunnel1_vgw_inside_address" { + description = "The RFC 6890 link-local address of the first VPN tunnel (Virtual Private Gateway side)" + value = module.vpn_connection.vpn_connection_tunnel1_vgw_inside_address +} + +output "vpn_connection_tunnel2_address" { + description = "The public IP address of the second VPN tunnel" + value = module.vpn_connection.vpn_connection_tunnel2_address +} + +output "vpn_connection_tunnel2_cgw_inside_address" { + description = "The RFC 6890 link-local address of the second VPN tunnel (Customer Gateway side)" + value = module.vpn_connection.vpn_connection_tunnel2_cgw_inside_address +} + +output "vpn_connection_tunnel2_vgw_inside_address" { + description = "The RFC 6890 link-local address of the second VPN tunnel (Virtual Private Gateway side)" + value = module.vpn_connection.vpn_connection_tunnel2_vgw_inside_address +} diff --git a/modules/site-to-site-vpn/providers.tf b/modules/site-to-site-vpn/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/site-to-site-vpn/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/site-to-site-vpn/remote-state.tf b/modules/site-to-site-vpn/remote-state.tf new file mode 100644 index 000000000..4e2391525 --- /dev/null +++ b/modules/site-to-site-vpn/remote-state.tf @@ -0,0 +1,8 @@ +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.vpc_component_name + + context = module.this.context +} diff --git a/modules/site-to-site-vpn/ssm.tf b/modules/site-to-site-vpn/ssm.tf new file mode 100644 index 000000000..281dfee89 --- /dev/null +++ b/modules/site-to-site-vpn/ssm.tf @@ -0,0 +1,25 @@ +locals { + ssm_enabled = local.enabled && var.ssm_enabled +} + +resource "aws_ssm_parameter" "tunnel1_preshared_key" { + count = local.ssm_enabled && local.preshared_key_enabled ? 0 : 1 + + name = format("%s/%s", var.ssm_path_prefix, "tunnel1_preshared_key") + value = local.tunnel1_preshared_key + description = format("Preshared Key for Tunnel1 in the %s Site-to-Site VPN connection", module.this.id) + type = "SecureString" + + tags = module.this.tags +} + +resource "aws_ssm_parameter" "tunnel2_preshared_key" { + count = local.ssm_enabled && local.preshared_key_enabled ? 0 : 1 + + name = format("%s/%s", var.ssm_path_prefix, "tunnel2_preshared_key") + value = local.tunnel2_preshared_key + description = format("Preshared Key for Tunnel2 in the %s Site-to-Site VPN connection", module.this.id) + type = "SecureString" + + tags = module.this.tags +} diff --git a/modules/site-to-site-vpn/variables.tf b/modules/site-to-site-vpn/variables.tf new file mode 100644 index 000000000..6ef29e23e --- /dev/null +++ b/modules/site-to-site-vpn/variables.tf @@ -0,0 +1,292 @@ +variable "region" { + type = string + description = "AWS Region" + nullable = false +} + +variable "vpc_component_name" { + type = string + description = "Atmos VPC component name" + default = "vpc" + nullable = false +} + +variable "customer_gateway_bgp_asn" { + type = number + description = "The Customer Gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN)" + nullable = false +} + +variable "customer_gateway_ip_address" { + type = string + description = "The IPv4 address for the Customer Gateway device's outside interface. Set to `null` to not create the Customer Gateway" + default = null +} + +variable "vpn_gateway_amazon_side_asn" { + type = number + description = "The Autonomous System Number (ASN) for the Amazon side of the VPN Gateway. If you don't specify an ASN, the Virtual Private Gateway is created with the default ASN" + default = null + nullable = false +} + +variable "vpn_connection_static_routes_only" { + type = bool + description = "If set to `true`, the VPN connection will use static routes exclusively. Static routes must be used for devices that don't support BGP" + default = false + nullable = false +} + +variable "vpn_connection_static_routes_destinations" { + type = list(string) + description = "List of CIDR blocks to be used as destination for static routes. Routes to destinations will be propagated to the VPC route tables" + default = [] + nullable = false +} + +variable "vpn_connection_local_ipv4_network_cidr" { + type = string + description = "The IPv4 CIDR on the Customer Gateway (on-premises) side of the VPN connection" + default = "0.0.0.0/0" +} + +variable "vpn_connection_remote_ipv4_network_cidr" { + type = string + description = "The IPv4 CIDR on the AWS side of the VPN connection" + default = "0.0.0.0/0" +} + +variable "vpn_connection_log_retention_in_days" { + type = number + description = "Specifies the number of days you want to retain log events" + default = 30 + nullable = false +} + +variable "vpn_connection_tunnel1_dpd_timeout_action" { + type = string + description = "The action to take after DPD timeout occurs for the first VPN tunnel. Specify restart to restart the IKE initiation. Specify `clear` to end the IKE session. Valid values are `clear` | `none` | `restart`" + default = "clear" + nullable = false +} + +variable "vpn_connection_tunnel1_ike_versions" { + type = list(string) + description = "The IKE versions that are permitted for the first VPN tunnel. Valid values are ikev1 | ikev2" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel1_inside_cidr" { + type = string + description = "The CIDR block of the inside IP addresses for the first VPN tunnel" + default = null +} + +variable "vpn_connection_tunnel1_phase1_encryption_algorithms" { + type = list(string) + description = "List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel1_phase2_encryption_algorithms" { + type = list(string) + description = "List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel1_phase1_integrity_algorithms" { + type = list(string) + description = "One or more integrity algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel1_phase2_integrity_algorithms" { + type = list(string) + description = "One or more integrity algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel1_phase1_dh_group_numbers" { + type = list(string) + description = "List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel1_phase2_dh_group_numbers" { + type = list(string) + description = "List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel1_preshared_key" { + type = string + description = "The preshared key of the first VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero. Allowed characters are alphanumeric characters, periods(.) and underscores(_)" + default = null +} + +variable "vpn_connection_tunnel1_startup_action" { + type = string + description = "The action to take when the establishing the tunnel for the first VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify `start` for AWS to initiate the IKE negotiation. Valid values are `add` | `start`" + default = "add" + nullable = false +} + +variable "vpn_connection_tunnel1_cloudwatch_log_enabled" { + type = bool + description = "Enable or disable VPN tunnel logging feature for the tunnel" + default = false + nullable = false +} + +variable "vpn_connection_tunnel1_cloudwatch_log_output_format" { + type = string + description = "Set log format for the tunnel. Default format is json. Possible values are `json` and `text`" + default = "json" + nullable = false +} + +variable "vpn_connection_tunnel2_dpd_timeout_action" { + type = string + description = "The action to take after DPD timeout occurs for the second VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are `clear` | `none` | `restart`" + default = "clear" + nullable = false +} + +variable "vpn_connection_tunnel2_ike_versions" { + type = list(string) + description = "The IKE versions that are permitted for the second VPN tunnel. Valid values are ikev1 | ikev2" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel2_inside_cidr" { + type = string + description = "The CIDR block of the inside IP addresses for the second VPN tunnel" + default = null +} + +variable "vpn_connection_tunnel2_phase1_encryption_algorithms" { + type = list(string) + description = "List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel2_phase2_encryption_algorithms" { + type = list(string) + description = "List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel2_phase1_integrity_algorithms" { + type = list(string) + description = "One or more integrity algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel2_phase2_integrity_algorithms" { + type = list(string) + description = "One or more integrity algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are SHA1 | SHA2-256 | SHA2-384 | SHA2-512" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel2_phase1_dh_group_numbers" { + type = list(string) + description = "List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel2_phase2_dh_group_numbers" { + type = list(string) + description = "List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24" + default = [] + nullable = false +} + +variable "vpn_connection_tunnel2_preshared_key" { + type = string + description = "The preshared key of the second VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero. Allowed characters are alphanumeric characters, periods(.) and underscores(_)" + default = null +} + +variable "vpn_connection_tunnel2_startup_action" { + type = string + description = "The action to take when the establishing the tunnel for the second VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify `start` for AWS to initiate the IKE negotiation. Valid values are `add` | `start`" + default = "add" + nullable = false +} + +variable "vpn_connection_tunnel2_cloudwatch_log_enabled" { + type = bool + description = "Enable or disable VPN tunnel logging feature for the tunnel" + default = false + nullable = false +} + +variable "vpn_connection_tunnel2_cloudwatch_log_output_format" { + type = string + description = "Set log format for the tunnel. Default format is json. Possible values are `json` and `text`" + default = "json" + nullable = false +} + +variable "existing_transit_gateway_id" { + type = string + default = "" + description = "Existing Transit Gateway ID. If provided, the module will not create a Virtual Private Gateway but instead will use the transit_gateway. For setting up transit gateway we can use the cloudposse/transit-gateway/aws module and pass the output transit_gateway_id to this variable" +} + +variable "transit_gateway_enabled" { + type = bool + description = "Set to true to enable VPN connection to transit gateway and then pass in the existing_transit_gateway_id" + default = false + nullable = false +} + +variable "transit_gateway_route_table_id" { + type = string + description = "The ID of the route table for the transit gateway that you want to associate + propagate the VPN connection's TGW attachment" + default = null +} + +variable "transit_gateway_routes" { + type = map(object({ + blackhole = optional(bool, false) + destination_cidr_block = string + })) + description = "A map of transit gateway routes to create on the given TGW route table (via `transit_gateway_route_table_id`) for the created VPN Attachment. Use the key in the map to describe the route" + default = {} + nullable = false +} + +variable "preshared_key_enabled" { + type = bool + description = "Flag to enable adding the preshared keys to the VPN connection" + default = true + nullable = false +} + +variable "ssm_enabled" { + type = bool + description = "Flag to enable saving the `tunnel1_preshared_key` and `tunnel2_preshared_key` in the SSM Parameter Store" + default = false + nullable = false +} + +variable "ssm_path_prefix" { + type = string + description = "SSM Key path prefix for the associated SSM parameters" + default = "" + nullable = false +} diff --git a/modules/site-to-site-vpn/versions.tf b/modules/site-to-site-vpn/versions.tf new file mode 100644 index 000000000..2445498a0 --- /dev/null +++ b/modules/site-to-site-vpn/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + random = { + source = "hashicorp/random" + version = ">= 2.2" + } + } +} diff --git a/modules/snowflake-account/README.md b/modules/snowflake-account/README.md index 1e6398999..e290c5231 100644 --- a/modules/snowflake-account/README.md +++ b/modules/snowflake-account/README.md @@ -1,16 +1,27 @@ +--- +tags: + - component/snowflake-account + - layer/unassigned + - provider/aws + - provider/snowflake +--- + # Component: `snowflake-account` -This component sets up the requirements for all other Snowflake components, including creating the Terraform service user. Before running this component, follow the manual, Click-Ops steps below to create a Snowflake subscription. +This component sets up the requirements for all other Snowflake components, including creating the Terraform service +user. Before running this component, follow the manual, Click-Ops steps below to create a Snowflake subscription. ## Deployment Steps 1. Open the AWS Console for the given stack. 2. Go to AWS Marketplace Subscriptions. -3. Click "Manage Subscriptions", click "Discover products", type "Snowflake" in the search bar. +3. Click "Manage Subscriptions", click "Discover products", type "Snowflake" in the search bar. 4. Select "Snowflake Data Cloud" 5. Click "Continue to Subscribe" -6. Fill out the information steps using the following as an example. Note, the provided email cannot use labels such as `mdev+sbx01@example.com`. +6. Fill out the information steps using the following as an example. Note, the provided email cannot use labels such as + `mdev+sbx01@example.com`. + ``` First Name: John Last Name: Smith @@ -18,20 +29,29 @@ This component sets up the requirements for all other Snowflake components, incl Company: Example Country: United States ``` -7. Select "Standard" and the current region. In this example, we chose "US East (Ohio)" which is the same as `us-east-1`. -7. Continue and wait for Sign Up to complete. Note the Snowflake account ID; you can find this in the newly accessible Snowflake console in the top right of the window. -8. Check for the Account Activation email. Note, this may be collected in a Slack notifications channel for easy access. -9. Follow the given link to create the Admin user with username `admin` and a strong password. Be sure to save that password somewhere secure. -10. Upload that password to AWS Parameter Store under `/snowflake/$ACCOUNT/users/admin/password`, where `ACCOUNT` is the value given during the subscription process. This password will only be used to create a private key, and all other authentication will be done with said key. Below is an example of how to do that with a [chamber](https://github.com/segmentio/chamber) command: + +7. Select "Standard" and the current region. In this example, we chose "US East (Ohio)" which is the same as + `us-east-1`. +8. Continue and wait for Sign Up to complete. Note the Snowflake account ID; you can find this in the newly accessible + Snowflake console in the top right of the window. +9. Check for the Account Activation email. Note, this may be collected in a Slack notifications channel for easy access. +10. Follow the given link to create the Admin user with username `admin` and a strong password. Be sure to save that + password somewhere secure. +11. Upload that password to AWS Parameter Store under `/snowflake/$ACCOUNT/users/admin/password`, where `ACCOUNT` is the + value given during the subscription process. This password will only be used to create a private key, and all other + authentication will be done with said key. Below is an example of how to do that with a + [chamber](https://github.com/segmentio/chamber) command: + ``` AWS_PROFILE=$NAMESPACE-$TENANT-gbl-sbx01-admin chamber write /snowflake/$ACCOUNT/users/admin/ admin $PASSWORD ``` -11. Finally, use atmos to deploy this component: + +11. Finally, use atmos to deploy this component: + ``` atmos terraform deploy snowflake/account --stack $TENANT-use2-sbx01 ``` - ## Usage **Stack Level**: Regional @@ -55,30 +75,32 @@ components: Service: snowflake ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 3.0 | -| [snowflake](#requirement\_snowflake) | ~> 0.25 | -| [tls](#requirement\_tls) | ~> 3.0 | +| [aws](#requirement\_aws) | >= 3.0 | +| [random](#requirement\_random) | >= 2.3 | +| [snowflake](#requirement\_snowflake) | >= 0.25 | +| [tls](#requirement\_tls) | >= 3.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 3.0 | -| [random](#provider\_random) | n/a | -| [snowflake](#provider\_snowflake) | ~> 0.25 | -| [tls](#provider\_tls) | ~> 3.0 | +| [aws](#provider\_aws) | >= 3.0 | +| [random](#provider\_random) | >= 2.3 | +| [snowflake](#provider\_snowflake) | >= 0.25 | +| [tls](#provider\_tls) | >= 3.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [account](#module\_account) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.1 | +| [account](#module\_account) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [introspection](#module\_introspection) | cloudposse/label/null | 0.25.0 | | [snowflake\_account](#module\_snowflake\_account) | cloudposse/label/null | 0.25.0 | @@ -115,8 +137,6 @@ components: | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [global\_environment\_name](#input\_global\_environment\_name) | Global environment name | `string` | `"gbl"` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -151,6 +171,6 @@ components: | [ssm\_path\_terraform\_user\_name](#output\_ssm\_path\_terraform\_user\_name) | The path to the SSM parameter for the Terraform user name. | | [ssm\_path\_terraform\_user\_private\_key](#output\_ssm\_path\_terraform\_user\_private\_key) | The path to the SSM parameter for the Terraform user private key. | - + [](https://cpco.io/component) diff --git a/modules/snowflake-account/default.auto.tfvars b/modules/snowflake-account/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/snowflake-account/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/snowflake-account/provider-snowflake.tf b/modules/snowflake-account/provider-snowflake.tf new file mode 100644 index 000000000..46eed29fa --- /dev/null +++ b/modules/snowflake-account/provider-snowflake.tf @@ -0,0 +1,12 @@ +data "aws_ssm_parameter" "snowflake_password" { + count = local.enabled ? 1 : 0 + name = local.ssm_path_admin_user_password + with_decryption = true +} + +provider "snowflake" { + account = var.snowflake_account + region = "${var.snowflake_account_region}.aws" # required to append ".aws" to region, see https://github.com/chanzuckerberg/terraform-provider-snowflake/issues/529 + username = local.admin_username + password = data.aws_ssm_parameter.snowflake_password[0].value +} diff --git a/modules/snowflake-account/providers.tf b/modules/snowflake-account/providers.tf index e611933c5..ef923e10a 100644 --- a/modules/snowflake-account/providers.tf +++ b/modules/snowflake-account/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,28 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -data "aws_ssm_parameter" "snowflake_password" { - count = local.enabled ? 1 : 0 - name = local.ssm_path_admin_user_password - with_decryption = true -} - -provider "snowflake" { - account = var.snowflake_account - region = "${var.snowflake_account_region}.aws" # required to append ".aws" to region, see https://github.com/chanzuckerberg/terraform-provider-snowflake/issues/529 - username = local.admin_username - password = data.aws_ssm_parameter.snowflake_password[0].value -} diff --git a/modules/snowflake-account/remote-state.tf b/modules/snowflake-account/remote-state.tf index a4a24923d..db5f163ef 100644 --- a/modules/snowflake-account/remote-state.tf +++ b/modules/snowflake-account/remote-state.tf @@ -1,6 +1,6 @@ module "account" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.1" + version = "1.5.0" component = "account" stage = var.root_account_stage_name diff --git a/modules/snowflake-account/versions.tf b/modules/snowflake-account/versions.tf index b4d6d2ffb..9d71862ba 100644 --- a/modules/snowflake-account/versions.tf +++ b/modules/snowflake-account/versions.tf @@ -4,15 +4,19 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 3.0" + version = ">= 3.0" } snowflake = { source = "chanzuckerberg/snowflake" - version = "~> 0.25" + version = ">= 0.25" } tls = { source = "hashicorp/tls" - version = "~> 3.0" + version = ">= 3.0" + } + random = { + source = "hashicorp/random" + version = ">= 2.3" } } } diff --git a/modules/snowflake-database/README.md b/modules/snowflake-database/README.md index a468c0c0e..a96776606 100644 --- a/modules/snowflake-database/README.md +++ b/modules/snowflake-database/README.md @@ -1,6 +1,15 @@ +--- +tags: + - component/snowflake-database + - layer/unassigned + - provider/aws + - provider/snowflake +--- + # Component: `snowflake-database` -All data in Snowflake is stored in database tables, logically structured as collections of columns and rows. This component will create and control a Snowflake database, schema, and set of tables. +All data in Snowflake is stored in database tables, logically structured as collections of columns and rows. This +component will create and control a Snowflake database, schema, and set of tables. ## Usage @@ -39,21 +48,22 @@ components: select * from "example"; ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 3.0 | -| [snowflake](#requirement\_snowflake) | ~> 0.25 | +| [aws](#requirement\_aws) | >= 3.0 | +| [snowflake](#requirement\_snowflake) | >= 0.25 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 3.0 | -| [snowflake](#provider\_snowflake) | ~> 0.25 | +| [aws](#provider\_aws) | >= 3.0 | +| [snowflake](#provider\_snowflake) | >= 0.25 | ## Modules @@ -61,7 +71,7 @@ components: |------|--------|---------| | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | | [introspection](#module\_introspection) | cloudposse/label/null | 0.25.0 | -| [snowflake\_account](#module\_snowflake\_account) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.1 | +| [snowflake\_account](#module\_snowflake\_account) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [snowflake\_database](#module\_snowflake\_database) | cloudposse/label/null | 0.25.0 | | [snowflake\_label](#module\_snowflake\_label) | cloudposse/label/null | 0.25.0 | | [snowflake\_schema](#module\_snowflake\_schema) | cloudposse/label/null | 0.25.0 | @@ -94,14 +104,12 @@ components: | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [data\_retention\_time\_in\_days](#input\_data\_retention\_time\_in\_days) | Time in days to retain data in Snowflake databases, schemas, and tables by default. | `string` | `1` | no | | [database\_comment](#input\_database\_comment) | The comment to give to the provisioned database. | `string` | `"A database created for managing programmatically created Snowflake schemas and tables."` | no | -| [database\_grants](#input\_database\_grants) | A list of Grants to give to the database created with component. | `list(string)` | `[]` | no | +| [database\_grants](#input\_database\_grants) | A list of Grants to give to the database created with component. | `list(string)` |
[
"MODIFY",
"MONITOR",
"USAGE"
]
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -111,23 +119,24 @@ components: | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [required\_tags](#input\_required\_tags) | List of required tag names | `list(string)` | `[]` | no | -| [schema\_grants](#input\_schema\_grants) | A list of Grants to give to the schema created with component. | `list(string)` | `[]` | no | +| [schema\_grants](#input\_schema\_grants) | A list of Grants to give to the schema created with component. | `list(string)` |
[
"MODIFY",
"MONITOR",
"USAGE",
"CREATE TABLE",
"CREATE VIEW"
]
| no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | -| [table\_grants](#input\_table\_grants) | A list of Grants to give to the tables created with component. | `list(string)` | `[]` | no | +| [table\_grants](#input\_table\_grants) | A list of Grants to give to the tables created with component. | `list(string)` |
[
"SELECT",
"INSERT",
"UPDATE",
"DELETE",
"TRUNCATE",
"REFERENCES"
]
| no | | [tables](#input\_tables) | A map of tables to create for Snowflake. A schema and database will be assigned for this group of tables. | `map(any)` | `{}` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [view\_grants](#input\_view\_grants) | A list of Grants to give to the views created with component. | `list(string)` | `[]` | no | +| [view\_grants](#input\_view\_grants) | A list of Grants to give to the views created with component. | `list(string)` |
[
"SELECT",
"REFERENCES"
]
| no | | [views](#input\_views) | A map of views to create for Snowflake. The same schema and database will be assigned as for tables. | `map(any)` | `{}` | no | ## Outputs No outputs. - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/snowflake-database) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/snowflake-database) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/snowflake-database/default.auto.tfvars b/modules/snowflake-database/default.auto.tfvars deleted file mode 100644 index 5ab99c1dd..000000000 --- a/modules/snowflake-database/default.auto.tfvars +++ /dev/null @@ -1,8 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - -database_grants = ["MODIFY", "MONITOR", "USAGE"] -schema_grants = ["MODIFY", "MONITOR", "USAGE", "CREATE TABLE", "CREATE VIEW"] -table_grants = ["SELECT", "INSERT", "UPDATE", "DELETE", "TRUNCATE", "REFERENCES"] -view_grants = ["SELECT", "REFERENCES"] diff --git a/modules/snowflake-database/main.tf b/modules/snowflake-database/main.tf index 5fd9e68e0..2e4b67a45 100644 --- a/modules/snowflake-database/main.tf +++ b/modules/snowflake-database/main.tf @@ -119,13 +119,13 @@ resource "snowflake_table" "tables" { snowflake_table_grant.grant ] - # Ignore changes to column type because of a known issue with the provider. + # Ignore changes to column type because of a known issue with the provider. # Terraform will show changes on plan for updating the type, even though there are not any changes. # https://github.com/chanzuckerberg/terraform-provider-snowflake/issues/494 # # Furthermore, Terraform doesn't support wildcards or variables in the lifecycle block, so either we can ignore all changes to `column` or need to list out all indices manually. # """ - # A single static variable reference is required: only attribute access and indexing with constant keys. + # A single static variable reference is required: only attribute access and indexing with constant keys. # No calculations, function calls, template expressions, etc are allowed here. # """ # https://github.com/hashicorp/terraform/issues/5666 diff --git a/modules/snowflake-database/provider-snowflake.tf b/modules/snowflake-database/provider-snowflake.tf new file mode 100644 index 000000000..777c558b6 --- /dev/null +++ b/modules/snowflake-database/provider-snowflake.tf @@ -0,0 +1,18 @@ +data "aws_ssm_parameter" "snowflake_username" { + count = local.enabled ? 1 : 0 + name = module.snowflake_account.outputs.ssm_path_terraform_user_name +} + +data "aws_ssm_parameter" "snowflake_private_key" { + count = local.enabled ? 1 : 0 + name = module.snowflake_account.outputs.ssm_path_terraform_user_private_key + with_decryption = true +} + +provider "snowflake" { + account = local.snowflake_account + # required to append ".aws" to region, see https://github.com/chanzuckerberg/terraform-provider-snowflake/issues/529 + region = "${local.snowflake_account_region}.aws" + username = data.aws_ssm_parameter.snowflake_username[0].value + private_key = data.aws_ssm_parameter.snowflake_private_key[0].value +} diff --git a/modules/snowflake-database/providers.tf b/modules/snowflake-database/providers.tf index c0018f0a3..ef923e10a 100644 --- a/modules/snowflake-database/providers.tf +++ b/modules/snowflake-database/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,34 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -data "aws_ssm_parameter" "snowflake_username" { - count = local.enabled ? 1 : 0 - name = module.snowflake_account.outputs.ssm_path_terraform_user_name -} - -data "aws_ssm_parameter" "snowflake_private_key" { - count = local.enabled ? 1 : 0 - name = module.snowflake_account.outputs.ssm_path_terraform_user_private_key - with_decryption = true -} - -provider "snowflake" { - account = local.snowflake_account - # required to append ".aws" to region, see https://github.com/chanzuckerberg/terraform-provider-snowflake/issues/529 - region = "${local.snowflake_account_region}.aws" - username = data.aws_ssm_parameter.snowflake_username[0].value - private_key = data.aws_ssm_parameter.snowflake_private_key[0].value -} diff --git a/modules/snowflake-database/remote-state.tf b/modules/snowflake-database/remote-state.tf index 13385a710..a39a2fa4a 100644 --- a/modules/snowflake-database/remote-state.tf +++ b/modules/snowflake-database/remote-state.tf @@ -1,6 +1,6 @@ module "snowflake_account" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.1" + version = "1.5.0" component = "snowflake-account" diff --git a/modules/snowflake-database/variables.tf b/modules/snowflake-database/variables.tf index cdd64d997..6d085cdce 100644 --- a/modules/snowflake-database/variables.tf +++ b/modules/snowflake-database/variables.tf @@ -24,25 +24,25 @@ variable "views" { variable "database_grants" { type = list(string) description = "A list of Grants to give to the database created with component." - default = [] + default = ["MODIFY", "MONITOR", "USAGE"] } variable "schema_grants" { type = list(string) description = "A list of Grants to give to the schema created with component." - default = [] + default = ["MODIFY", "MONITOR", "USAGE", "CREATE TABLE", "CREATE VIEW"] } variable "table_grants" { type = list(string) description = "A list of Grants to give to the tables created with component." - default = [] + default = ["SELECT", "INSERT", "UPDATE", "DELETE", "TRUNCATE", "REFERENCES"] } variable "view_grants" { type = list(string) description = "A list of Grants to give to the views created with component." - default = [] + default = ["SELECT", "REFERENCES"] } variable "database_comment" { diff --git a/modules/snowflake-database/versions.tf b/modules/snowflake-database/versions.tf index 0bdef00a2..480cd311b 100644 --- a/modules/snowflake-database/versions.tf +++ b/modules/snowflake-database/versions.tf @@ -4,11 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 3.0" + version = ">= 3.0" } snowflake = { source = "chanzuckerberg/snowflake" - version = "~> 0.25" + version = ">= 0.25" } } } diff --git a/modules/sns-topic/README.md b/modules/sns-topic/README.md index 25b4c9744..97fdb4665 100644 --- a/modules/sns-topic/README.md +++ b/modules/sns-topic/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/sns-topic + - layer/addons + - provider/aws +--- + # Component: `sns-topic` This component is responsible for provisioning an SNS topic. @@ -40,16 +47,16 @@ components: fifo_queue_enabled: false content_based_deduplication: false redrive_policy_max_receiver_count: 5 - redrive_policy: null + redrive_policy: null ``` ```yaml import: -- catalog/sns-topic/defaults + - catalog/sns-topic/defaults components: terraform: - sns-topic-example: + sns-topic-example: metadata: component: sns-topic inherits: @@ -65,13 +72,14 @@ components: endpoint_auto_confirms: true ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers @@ -108,8 +116,6 @@ No resources. | [fifo\_queue\_enabled](#input\_fifo\_queue\_enabled) | Whether or not to create a FIFO (first-in-first-out) queue | `bool` | `false` | no | | [fifo\_topic](#input\_fifo\_topic) | Whether or not to create a FIFO (first-in-first-out) topic | `bool` | `false` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kms\_master\_key\_id](#input\_kms\_master\_key\_id) | The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. | `string` | `"alias/aws/sns"` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | @@ -146,8 +152,11 @@ No resources. | [sns\_topic\_owner](#output\_sns\_topic\_owner) | SNS topic owner. | | [sns\_topic\_subscriptions](#output\_sns\_topic\_subscriptions) | SNS topic subscription. | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/sns-topic) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/sns-topic) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/sns-topic/default.auto.tfvars b/modules/sns-topic/default.auto.tfvars deleted file mode 100644 index ed6608db6..000000000 --- a/modules/sns-topic/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans -enabled = false - diff --git a/modules/sns-topic/providers.tf b/modules/sns-topic/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/sns-topic/providers.tf +++ b/modules/sns-topic/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/sns-topic/variables.tf b/modules/sns-topic/variables.tf index 1e5b21d9b..e10e3340f 100644 --- a/modules/sns-topic/variables.tf +++ b/modules/sns-topic/variables.tf @@ -61,7 +61,7 @@ variable "sns_topic_policy_json" { } # Enabling sqs_dlq_enabled won't be effective. -# SNS subscription - redrive policy parameter is not yet avaialable in TF - waiting for PR https://github.com/terraform-providers/terraform-provider-aws/issues/10931 +# SNS subscription - redrive policy parameter is not yet available in TF - waiting for PR https://github.com/terraform-providers/terraform-provider-aws/issues/10931 variable "sqs_dlq_enabled" { type = bool description = "Enable delivery of failed notifications to SQS and monitor messages in queue." diff --git a/modules/sns-topic/versions.tf b/modules/sns-topic/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/sns-topic/versions.tf +++ b/modules/sns-topic/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/spa-s3-cloudfront/CHANGELOG.md b/modules/spa-s3-cloudfront/CHANGELOG.md new file mode 100644 index 000000000..c8a3fe753 --- /dev/null +++ b/modules/spa-s3-cloudfront/CHANGELOG.md @@ -0,0 +1,55 @@ +## Component PRs [#991](https://github.com/cloudposse/terraform-aws-components/pull/991) and [#995](https://github.com/cloudposse/terraform-aws-components/pull/995) + +### Drop `lambda_edge_redirect_404` + +This PRs removes the `lambda_edge_redirect_404` functionality because it leads to significant costs. Use native +CloudFront error pages configs instead. + +```yaml +cloudfront_custom_error_response: + - error_code: 404 + response_code: 404 + response_page_path: /404.html +``` + +## Components PR [#978](https://github.com/cloudposse/terraform-aws-components/pull/978) + +### Lambda@Edge Submodule Refactor + +This PR has significantly refactored how Lambda@Edge functions are managed by Terraform with this component. Previously, +the specific use cases for Lambda@Edge functions were handled by submodules `lambda-edge-preview` and +`lambda_edge_redirect_404`. These component submodules both called the same Terraform module, +`cloudposse/cloudfront-s3-cdn/aws//modules/lambda@edge`. These submodules have been replaced with a single Terraform +file, `lambda_edge.tf`. + +The reason a single file is better than submodules is (1) simplification and (2) adding the ability to deep merge +function configuration. Cloudfront Distributions support a single Lambda@Edge function for each origin/viewer request or +response. With deep merging, we can define default values for function configuration and provide the ability to +overwrite specific values for a given deployment. + +Specifically, our own use case is using an authorization Lambda@Edge viewer request only if the paywall is enabled. +Other deployments use an alternative viewer request to redirect 404. + +#### Upgrading with `preview_environment_enabled: true` or `lambda_edge_redirect_404_enabled: true` + +If you have `var.preview_environment_enabled` or `var.lambda_edge_redirect_404_enabled` set to `true`, Terraform `moved` +will move the previous resource by submodule to the new resource by file. Please give your next Terraform plan a sanity +check. Any existing Lambda functions _should not be destroyed_ by this change. + +#### Upgrading with both `preview_environment_enabled: false` and `lambda_edge_redirect_404_enabled: false` + +If you have no Lambda@Edge functions deployed and where both `var.preview_environment_enabled` and +`var.lambda_edge_redirect_404_enabled` are `false` (the default value), no change is necessary. + +### Lambda Runtime Version + +The previous PR [#946](https://github.com/cloudposse/terraform-aws-components/pull/946) introduced the +`var.lambda_runtime` input. Previously, the version of node in both submodules was hard-coded to be `nodejs12.x`. This +PR renames that variable to `var.lambda_edge_runtime` and sets the default to `nodejs16.x`. + +If you want to maintain the previous version of Node, set `var.lambda_edge_runtime` to `nodejs12.x`, though be aware +that AWS deprecated that version on March 31, 2023, and lambdas using that environment may no longer work. Otherwise, +this component will attempt to deploy the functions with runtime `nodejs16.x`. + +- [See all available runtimes here](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime) +- [See runtime environment deprecation dates here](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-support-policy) diff --git a/modules/spa-s3-cloudfront/README.md b/modules/spa-s3-cloudfront/README.md new file mode 100644 index 000000000..173f9a2b7 --- /dev/null +++ b/modules/spa-s3-cloudfront/README.md @@ -0,0 +1,296 @@ +--- +tags: + - component/spa-s3-cloudfront + - layer/addons + - provider/aws +--- + +# Component: `spa-s3-cloudfront` + +This component is responsible for provisioning: + +- S3 bucket +- CloudFront distribution for a Single Page Application +- ACM placed in us-east-1 regardless of the stack region (requirement of CloudFront) + +NOTE: The component does not use the ACM created by `dns-delegated`, because the ACM region has to be us-east-1. + +## Usage + +**Stack Level**: Regional + +Here are some example snippets for how to use this component: + +An import for all instantiations of the `spa-s3-cloudfront` component can be created at `stacks/spa/spa-defaults.yaml`: + +```yaml +components: + terraform: + spa-s3-cloudfront: + vars: + # lookup GitHub Runner IAM role via remote state + github_runners_deployment_principal_arn_enabled: true + github_runners_component_name: github-runners + github_runners_tenant_name: core + github_runners_environment_name: ue2 + github_runners_stage_name: auto + origin_force_destroy: false + origin_versioning_enabled: true + origin_block_public_acls: true + origin_block_public_policy: true + origin_ignore_public_acls: true + origin_restrict_public_buckets: true + origin_encryption_enabled: true + cloudfront_index_document: index.html + cloudfront_ipv6_enabled: false + cloudfront_compress: true + cloudfront_default_root_object: index.html + cloudfront_viewer_protocol_policy: redirect-to-https +``` + +An import for all instantiations for a specific SPA can be created at `stacks/spa/example-spa.yaml`: + +```yaml +components: + terraform: + example-spa: + component: spa-s3-cloudfront + vars: + name: example-spa + site_subdomain: example-spa + cloudfront_allowed_methods: + - GET + - HEAD + cloudfront_cached_methods: + - GET + - HEAD + cloudfront_custom_error_response: + - error_caching_min_ttl: 1 + error_code: 403 + response_code: 200 + response_page_path: /index.html + cloudfront_default_ttl: 60 + cloudfront_min_ttl: 60 + cloudfront_max_ttl: 60 +``` + +Finally, the `spa-s3-cloudfront` component can be instantiated in a stack config: + +```yaml +import: + - spa/example-spa + +components: + terraform: + example-spa: + component: spa-s3-cloudfront + settings: + spacelift: + workspace_enabled: true + vars: {} +``` + +### Failover Origins + +Failover origins are supported via `var.failover_s3_origin_name` and `var.failover_s3_origin_region`. + +### Preview Environments + +SPA Preview environments (i.e. `subdomain.example.com` mapping to a `/subdomain` path in the S3 bucket) powered by +Lambda@Edge are supported via `var.preview_environment_enabled`. See the both the variable description and inline +documentation for an extensive explanation for how these preview environments work. + +### Customizing Lambda@Edge + +This component supports customizing Lambda@Edge functions for the CloudFront distribution. All Lambda@Edge function +configuration is deep merged before being passed to the `cloudposse/cloudfront-s3-cdn/aws//modules/lambda@edge` module. +You can add additional functions and overwrite existing functions as such: + +```yaml +import: + - catalog/spa-s3-cloudfront/defaults + +components: + terraform: + refarch-docs-site-spa: + metadata: + component: spa-s3-cloudfront + inherits: + - spa-s3-cloudfront-defaults + vars: + enabled: true + lambda_edge_functions: + viewer_request: # overwrite existing function + source: null # this overwrites the 404 viewer request source with deep merging + source_zip: "./dist/lambda_edge_paywall_viewer_request.zip" + runtime: "nodejs16.x" + handler: "index.handler" + event_type: "viewer-request" + include_body: false + viewer_response: # new function + source_zip: "./dist/lambda_edge_paywall_viewer_response.zip" + runtime: "nodejs16.x" + handler: "index.handler" + event_type: "viewer-response" + include_body: false +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | +| [aws.failover](#provider\_aws.failover) | >= 4.9.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [acm\_request\_certificate](#module\_acm\_request\_certificate) | cloudposse/acm-request-certificate/aws | 0.18.0 | +| [dns\_delegated](#module\_dns\_delegated) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [gha\_assume\_role](#module\_gha\_assume\_role) | ../account-map/modules/team-assume-role-policy | n/a | +| [gha\_role\_name](#module\_gha\_role\_name) | cloudposse/label/null | 0.25.0 | +| [github\_runners](#module\_github\_runners) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [lambda\_edge](#module\_lambda\_edge) | cloudposse/cloudfront-s3-cdn/aws//modules/lambda@edge | 0.92.0 | +| [lambda\_edge\_functions](#module\_lambda\_edge\_functions) | cloudposse/config/yaml//modules/deepmerge | 1.0.2 | +| [spa\_web](#module\_spa\_web) | cloudposse/cloudfront-s3-cdn/aws | 0.95.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [utils](#module\_utils) | cloudposse/utils/aws | 1.3.0 | +| [waf](#module\_waf) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudfront_cache_policy.created_cache_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_cache_policy) | resource | +| [aws_cloudfront_origin_request_policy.created_origin_request_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_origin_request_policy) | resource | +| [aws_iam_policy.additional_lambda_edge_permission](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.github_actions](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.additional_lambda_edge_permission](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_shield_protection.shield_protection](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/shield_protection) | resource | +| [aws_iam_policy_document.additional_lambda_edge_permission](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.github_actions_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_s3_bucket.failover_bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/s3_bucket) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [block\_origin\_public\_access\_enabled](#input\_block\_origin\_public\_access\_enabled) | When set to 'true' the s3 origin bucket will have public access block enabled. | `bool` | `true` | no | +| [cloudfront\_access\_log\_bucket\_name](#input\_cloudfront\_access\_log\_bucket\_name) | When `cloudfront_access_log_create_bucket` is `false`, this is the name of the existing S3 Bucket where
CloudFront Access Logs are to be delivered and is required. IGNORED when `cloudfront_access_log_create_bucket` is `true`. | `string` | `""` | no | +| [cloudfront\_access\_log\_bucket\_name\_rendering\_enabled](#input\_cloudfront\_access\_log\_bucket\_name\_rendering\_enabled) | If set to `true`, then the CloudFront origin access logs bucket name will be rendered by calling `format("%v-%v-%v-%v", var.namespace, var.environment, var.stage, var.cloudfront_access_log_bucket_name)`.
Otherwise, the value for `cloudfront_access_log_bucket_name` will need to be the globally unique name of the access logs bucket.

For example, if this component produces an origin bucket named `eg-ue1-devplatform-example` and `cloudfront_access_log_bucket_name` is set to
`example-cloudfront-access-logs`, then the bucket name will be rendered to be `eg-ue1-devplatform-example-cloudfront-access-logs`. | `bool` | `false` | no | +| [cloudfront\_access\_log\_create\_bucket](#input\_cloudfront\_access\_log\_create\_bucket) | When `true` and `cloudfront_access_logging_enabled` is also true, this module will create a new,
separate S3 bucket to receive CloudFront Access Logs. | `bool` | `true` | no | +| [cloudfront\_access\_log\_prefix](#input\_cloudfront\_access\_log\_prefix) | Prefix to use for CloudFront Access Log object keys. Defaults to no prefix. | `string` | `""` | no | +| [cloudfront\_access\_log\_prefix\_rendering\_enabled](#input\_cloudfront\_access\_log\_prefix\_rendering\_enabled) | Whether or not to dynamically render ${module.this.id} at the end of `var.cloudfront_access_log_prefix`. | `bool` | `false` | no | +| [cloudfront\_allowed\_methods](#input\_cloudfront\_allowed\_methods) | List of allowed methods (e.g. GET, PUT, POST, DELETE, HEAD) for AWS CloudFront. | `list(string)` |
[
"DELETE",
"GET",
"HEAD",
"OPTIONS",
"PATCH",
"POST",
"PUT"
]
| no | +| [cloudfront\_aws\_shield\_protection\_enabled](#input\_cloudfront\_aws\_shield\_protection\_enabled) | Enable or disable AWS Shield Advanced protection for the CloudFront distribution. If set to 'true', a subscription to AWS Shield Advanced must exist in this account. | `bool` | `false` | no | +| [cloudfront\_aws\_waf\_component\_name](#input\_cloudfront\_aws\_waf\_component\_name) | The name of the component used when deploying WAF ACL | `string` | `"waf"` | no | +| [cloudfront\_aws\_waf\_environment](#input\_cloudfront\_aws\_waf\_environment) | The environment where the WAF ACL for CloudFront distribution exists. | `string` | `null` | no | +| [cloudfront\_aws\_waf\_protection\_enabled](#input\_cloudfront\_aws\_waf\_protection\_enabled) | Enable or disable AWS WAF for the CloudFront distribution.

This assumes that the `aws-waf-acl-default-cloudfront` component has been deployed to the regional stack corresponding
to `var.waf_acl_environment`. | `bool` | `true` | no | +| [cloudfront\_cached\_methods](#input\_cloudfront\_cached\_methods) | List of cached methods (e.g. GET, PUT, POST, DELETE, HEAD). | `list(string)` |
[
"GET",
"HEAD"
]
| no | +| [cloudfront\_compress](#input\_cloudfront\_compress) | Compress content for web requests that include Accept-Encoding: gzip in the request header. | `bool` | `false` | no | +| [cloudfront\_custom\_error\_response](#input\_cloudfront\_custom\_error\_response) | List of one or more custom error response element maps. |
list(object({
error_caching_min_ttl = optional(string, "10")
error_code = string
response_code = string
response_page_path = string
}))
| `[]` | no | +| [cloudfront\_default\_root\_object](#input\_cloudfront\_default\_root\_object) | Object that CloudFront return when requests the root URL. | `string` | `"index.html"` | no | +| [cloudfront\_default\_ttl](#input\_cloudfront\_default\_ttl) | Default amount of time (in seconds) that an object is in a CloudFront cache. | `number` | `60` | no | +| [cloudfront\_index\_document](#input\_cloudfront\_index\_document) | Amazon S3 returns this index document when requests are made to the root domain or any of the subfolders. | `string` | `"index.html"` | no | +| [cloudfront\_ipv6\_enabled](#input\_cloudfront\_ipv6\_enabled) | Set to true to enable an AAAA DNS record to be set as well as the A record. | `bool` | `true` | no | +| [cloudfront\_lambda\_function\_association](#input\_cloudfront\_lambda\_function\_association) | A config block that configures the CloudFront distribution with lambda@edge functions for specific events. |
list(object({
event_type = string
include_body = bool
lambda_arn = string
}))
| `[]` | no | +| [cloudfront\_max\_ttl](#input\_cloudfront\_max\_ttl) | Maximum amount of time (in seconds) that an object is in a CloudFront cache. | `number` | `31536000` | no | +| [cloudfront\_min\_ttl](#input\_cloudfront\_min\_ttl) | Minimum amount of time that you want objects to stay in CloudFront caches. | `number` | `0` | no | +| [cloudfront\_viewer\_protocol\_policy](#input\_cloudfront\_viewer\_protocol\_policy) | Limit the protocol users can use to access content. One of `allow-all`, `https-only`, or `redirect-to-https`. | `string` | `"redirect-to-https"` | no | +| [comment](#input\_comment) | Any comments you want to include about the distribution. | `string` | `"Managed by Terraform"` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [custom\_origins](#input\_custom\_origins) | A list of additional custom website [origins](https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#origin-arguments) for this distribution. |
list(object({
domain_name = string
origin_id = string
origin_path = string
custom_headers = list(object({
name = string
value = string
}))
custom_origin_config = object({
http_port = number
https_port = number
origin_protocol_policy = string
origin_ssl_protocols = list(string)
origin_keepalive_timeout = number
origin_read_timeout = number
})
}))
| `[]` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dns\_delegated\_environment\_name](#input\_dns\_delegated\_environment\_name) | The environment where `dns-delegated` component is deployed to | `string` | `"gbl"` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [external\_aliases](#input\_external\_aliases) | List of FQDN's - Used to set the Alternate Domain Names (CNAMEs) setting on CloudFront. No new Route53 records will be created for these.

Setting `process_domain_validation_options` to true may cause the component to fail if an external\_alias DNS zone is not controlled by Terraform.

Setting `preview_environment_enabled` to `true` will cause this variable to be ignored. | `list(string)` | `[]` | no | +| [failover\_criteria\_status\_codes](#input\_failover\_criteria\_status\_codes) | List of HTTP Status Codes to use as the origin group failover criteria. | `list(string)` |
[
403,
404,
500,
502
]
| no | +| [failover\_s3\_origin\_environment](#input\_failover\_s3\_origin\_environment) | The [fixed name](https://github.com/cloudposse/terraform-aws-utils/blob/399951e552483a4f4c1dc7fbe2675c443f3dbd83/main.tf#L10) of the AWS Region where the
failover S3 origin exists. Setting this variable will enable use of a failover S3 origin, but it is required for the
failover S3 origin to exist beforehand. This variable is used in conjunction with `var.failover_s3_origin_format` to
build out the name of the Failover S3 origin in the specified region.

For example, if this component creates an origin of name `eg-ue1-devplatform-example` and this variable is set to `uw1`,
then it is expected that a bucket with the name `eg-uw1-devplatform-example-failover` exists in `us-west-1`. | `string` | `null` | no | +| [failover\_s3\_origin\_format](#input\_failover\_s3\_origin\_format) | If `var.failover_s3_origin_environment` is supplied, this is the format to use for the failover S3 origin bucket name when
building the name via `format([format], var.namespace, var.failover_s3_origin_environment, var.stage, var.name)`
and then looking it up via the `aws_s3_bucket` Data Source.

For example, if this component creates an origin of name `eg-ue1-devplatform-example` and `var.failover_s3_origin_environment`
is set to `uw1`, then it is expected that a bucket with the name `eg-uw1-devplatform-example-failover` exists in `us-west-1`. | `string` | `"%v-%v-%v-%v-failover"` | no | +| [forward\_cookies](#input\_forward\_cookies) | Specifies whether you want CloudFront to forward all or no cookies to the origin. Can be 'all' or 'none' | `string` | `"none"` | no | +| [forward\_header\_values](#input\_forward\_header\_values) | A list of whitelisted header values to forward to the origin (incompatible with `cache_policy_id`) | `list(string)` |
[
"Access-Control-Request-Headers",
"Access-Control-Request-Method",
"Origin"
]
| no | +| [github\_actions\_allowed\_repos](#input\_github\_actions\_allowed\_repos) | A list of the GitHub repositories that are allowed to assume this role from GitHub Actions. For example,
["cloudposse/infra-live"]. Can contain "*" as wildcard.
If org part of repo name is omitted, "cloudposse" will be assumed. | `list(string)` | `[]` | no | +| [github\_actions\_iam\_role\_attributes](#input\_github\_actions\_iam\_role\_attributes) | Additional attributes to add to the role name | `list(string)` | `[]` | no | +| [github\_actions\_iam\_role\_enabled](#input\_github\_actions\_iam\_role\_enabled) | Flag to toggle creation of an IAM Role that GitHub Actions can assume to access AWS resources | `bool` | `false` | no | +| [github\_runners\_component\_name](#input\_github\_runners\_component\_name) | The name of the component that deploys GitHub Runners, used in remote-state lookup | `string` | `"github-runners"` | no | +| [github\_runners\_deployment\_principal\_arn\_enabled](#input\_github\_runners\_deployment\_principal\_arn\_enabled) | A flag that is used to decide whether or not to include the GitHub Runner's IAM role in origin\_deployment\_principal\_arns list | `bool` | `true` | no | +| [github\_runners\_environment\_name](#input\_github\_runners\_environment\_name) | The name of the environment where the CloudTrail bucket is provisioned | `string` | `"ue2"` | no | +| [github\_runners\_stage\_name](#input\_github\_runners\_stage\_name) | The stage name where the CloudTrail bucket is provisioned | `string` | `"auto"` | no | +| [github\_runners\_tenant\_name](#input\_github\_runners\_tenant\_name) | The tenant name where the GitHub Runners are provisioned | `string` | `null` | no | +| [http\_version](#input\_http\_version) | The maximum HTTP version to support on the distribution. Allowed values are http1.1, http2, http2and3 and http3 | `string` | `"http2"` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [lambda\_edge\_allowed\_ssm\_parameters](#input\_lambda\_edge\_allowed\_ssm\_parameters) | The Lambda@Edge functions will be allowed to access the list of AWS SSM parameter with these ARNs | `list(string)` | `[]` | no | +| [lambda\_edge\_destruction\_delay](#input\_lambda\_edge\_destruction\_delay) | The delay, in [Golang ParseDuration](https://pkg.go.dev/time#ParseDuration) format, to wait before destroying the Lambda@Edge
functions.

This delay is meant to circumvent Lambda@Edge functions not being immediately deletable following their dissociation from
a CloudFront distribution, since they are replicated to CloudFront Edge servers around the world.

If set to `null`, no delay will be introduced.

By default, the delay is 20 minutes. This is because it takes about 3 minutes to destroy a CloudFront distribution, and
around 15 minutes until the Lambda@Edge function is available for deletion, in most cases.

For more information, see: https://github.com/hashicorp/terraform-provider-aws/issues/1721. | `string` | `"20m"` | no | +| [lambda\_edge\_functions](#input\_lambda\_edge\_functions) | Lambda@Edge functions to create.

The key of this map is the name of the Lambda@Edge function.

This map will be deep merged with each enabled default function. Use deep merge to change or overwrite specific values passed by those function objects. |
map(object({
source = optional(list(object({
filename = string
content = string
})))
source_dir = optional(string)
source_zip = optional(string)
runtime = string
handler = string
event_type = string
include_body = bool
}))
| `{}` | no | +| [lambda\_edge\_handler](#input\_lambda\_edge\_handler) | The default Lambda@Edge handler for all functions.

This value is deep merged in `module.lambda_edge_functions` with `var.lambda_edge_functions` and can be overwritten for any individual function. | `string` | `"index.handler"` | no | +| [lambda\_edge\_runtime](#input\_lambda\_edge\_runtime) | The default Lambda@Edge runtime for all functions.

This value is deep merged in `module.lambda_edge_functions` with `var.lambda_edge_functions` and can be overwritten for any individual function. | `string` | `"nodejs16.x"` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [ordered\_cache](#input\_ordered\_cache) | An ordered list of [cache behaviors](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#cache-behavior-arguments) resource for this distribution.
List in order of precedence (first match wins). This is in addition to the default cache policy.
Set `target_origin_id` to `""` to specify the S3 bucket origin created by this module.
Set `cache_policy_id` to `""` to use `cache_policy_name` for creating a new policy. At least one of the two must be set.
Set `origin_request_policy_id` to `""` to use `origin_request_policy_name` for creating a new policy. At least one of the two must be set. |
list(object({
target_origin_id = string
path_pattern = string

allowed_methods = list(string)
cached_methods = list(string)
compress = bool
trusted_signers = list(string)
trusted_key_groups = list(string)

cache_policy_name = optional(string)
cache_policy_id = optional(string)
origin_request_policy_name = optional(string)
origin_request_policy_id = optional(string)

viewer_protocol_policy = string
min_ttl = number
default_ttl = number
max_ttl = number
response_headers_policy_id = string

forward_query_string = bool
forward_header_values = list(string)
forward_cookies = string
forward_cookies_whitelisted_names = list(string)

lambda_function_association = list(object({
event_type = string
include_body = bool
lambda_arn = string
}))

function_association = list(object({
event_type = string
function_arn = string
}))
}))
| `[]` | no | +| [origin\_allow\_ssl\_requests\_only](#input\_origin\_allow\_ssl\_requests\_only) | Set to `true` in order to have the origin bucket require requests to use Secure Socket Layer (HTTPS/SSL). This will explicitly deny access to HTTP requests | `bool` | `true` | no | +| [origin\_bucket](#input\_origin\_bucket) | Name of an existing S3 bucket to use as the origin. If this is not provided, this component will create a new s3 bucket using `var.name` and other context related inputs | `string` | `null` | no | +| [origin\_deployment\_actions](#input\_origin\_deployment\_actions) | List of actions to permit `origin_deployment_principal_arns` to perform on bucket and bucket prefixes (see `origin_deployment_principal_arns`) | `list(string)` |
[
"s3:PutObject",
"s3:PutObjectAcl",
"s3:GetObject",
"s3:DeleteObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:GetBucketLocation",
"s3:AbortMultipartUpload"
]
| no | +| [origin\_deployment\_principal\_arns](#input\_origin\_deployment\_principal\_arns) | List of role ARNs to grant deployment permissions to the origin Bucket. | `list(string)` | `[]` | no | +| [origin\_encryption\_enabled](#input\_origin\_encryption\_enabled) | When set to 'true' the origin Bucket will have aes256 encryption enabled by default. | `bool` | `true` | no | +| [origin\_force\_destroy](#input\_origin\_force\_destroy) | A boolean string that indicates all objects should be deleted from the origin Bucket so that the Bucket can be destroyed without error. These objects are not recoverable. | `bool` | `false` | no | +| [origin\_s3\_access\_log\_bucket\_name](#input\_origin\_s3\_access\_log\_bucket\_name) | Name of the existing S3 bucket where S3 Access Logs for the origin Bucket will be delivered. Default is not to enable S3 Access Logging for the origin Bucket. | `string` | `""` | no | +| [origin\_s3\_access\_log\_bucket\_name\_rendering\_enabled](#input\_origin\_s3\_access\_log\_bucket\_name\_rendering\_enabled) | If set to `true`, then the S3 origin access logs bucket name will be rendered by calling `format("%v-%v-%v-%v", var.namespace, var.environment, var.stage, var.origin_s3_access_log_bucket_name)`.
Otherwise, the value for `origin_s3_access_log_bucket_name` will need to be the globally unique name of the access logs bucket.

For example, if this component produces an origin bucket named `eg-ue1-devplatform-example` and `origin_s3_access_log_bucket_name` is set to
`example-s3-access-logs`, then the bucket name will be rendered to be `eg-ue1-devplatform-example-s3-access-logs`. | `bool` | `false` | no | +| [origin\_s3\_access\_log\_prefix](#input\_origin\_s3\_access\_log\_prefix) | Prefix to use for S3 Access Log object keys. Defaults to `logs/${module.this.id}` | `string` | `""` | no | +| [origin\_s3\_access\_logging\_enabled](#input\_origin\_s3\_access\_logging\_enabled) | Set `true` to deliver S3 Access Logs to the `origin_s3_access_log_bucket_name` bucket.
Defaults to `false` if `origin_s3_access_log_bucket_name` is empty (the default), `true` otherwise.
Must be set explicitly if the access log bucket is being created at the same time as this module is being invoked. | `bool` | `null` | no | +| [origin\_versioning\_enabled](#input\_origin\_versioning\_enabled) | Enable or disable versioning for the origin Bucket. Versioning is a means of keeping multiple variants of an object in the same bucket. | `bool` | `false` | no | +| [parent\_zone\_name](#input\_parent\_zone\_name) | Parent domain name of site to publish. Defaults to format(parent\_zone\_name\_pattern, stage, environment). | `string` | `""` | no | +| [preview\_environment\_enabled](#input\_preview\_environment\_enabled) | Enable or disable SPA Preview Environments via Lambda@Edge, i.e. mapping `subdomain.example.com` to the `/subdomain`
path in the origin S3 bucket.

This variable implicitly affects the following variables:

* `s3_website_enabled`
* `s3_website_password_enabled`
* `block_origin_public_access_enabled`
* `origin_allow_ssl_requests_only`
* `forward_header_values`
* `cloudfront_default_ttl`
* `cloudfront_min_ttl`
* `cloudfront_max_ttl`
* `cloudfront_lambda_function_association` | `bool` | `false` | no | +| [process\_domain\_validation\_options](#input\_process\_domain\_validation\_options) | Flag to enable/disable processing of the record to add to the DNS zone to complete certificate validation | `bool` | `true` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region. | `string` | n/a | yes | +| [s3\_object\_ownership](#input\_s3\_object\_ownership) | Specifies the S3 object ownership control on the origin bucket. Valid values are `ObjectWriter`, `BucketOwnerPreferred`, and 'BucketOwnerEnforced'. | `string` | `"ObjectWriter"` | no | +| [s3\_origins](#input\_s3\_origins) | A list of S3 [origins](https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#origin-arguments) (in addition to the one created by this component) for this distribution.
S3 buckets configured as websites are `custom_origins`, not `s3_origins`.
Specifying `s3_origin_config.origin_access_identity` as `null` or `""` will have it translated to the `origin_access_identity` used by the origin created by this component. |
list(object({
domain_name = string
origin_id = string
origin_path = string
s3_origin_config = object({
origin_access_identity = string
})
}))
| `[]` | no | +| [s3\_website\_enabled](#input\_s3\_website\_enabled) | Set to true to enable the created S3 bucket to serve as a website independently of CloudFront,
and to use that website as the origin.

Setting `preview_environment_enabled` will implicitly set this to `true`. | `bool` | `false` | no | +| [s3\_website\_password\_enabled](#input\_s3\_website\_password\_enabled) | If set to true, and `s3_website_enabled` is also true, a password will be required in the `Referrer` field of the
HTTP request in order to access the website, and CloudFront will be configured to pass this password in its requests.
This will make it much harder for people to bypass CloudFront and access the S3 website directly via its website endpoint. | `bool` | `false` | no | +| [site\_fqdn](#input\_site\_fqdn) | Fully qualified domain name of site to publish. Overrides site\_subdomain and parent\_zone\_name. | `string` | `""` | no | +| [site\_subdomain](#input\_site\_subdomain) | Subdomain to plug into site\_name\_pattern to make site FQDN. | `string` | `""` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [cloudfront\_distribution\_alias](#output\_cloudfront\_distribution\_alias) | Cloudfront Distribution Alias Record. | +| [cloudfront\_distribution\_domain\_name](#output\_cloudfront\_distribution\_domain\_name) | Cloudfront Distribution Domain Name. | +| [cloudfront\_distribution\_identity\_arn](#output\_cloudfront\_distribution\_identity\_arn) | CloudFront Distribution Origin Access Identity IAM ARN. | +| [failover\_s3\_bucket\_name](#output\_failover\_s3\_bucket\_name) | Failover Origin bucket name, if enabled. | +| [github\_actions\_iam\_role\_arn](#output\_github\_actions\_iam\_role\_arn) | ARN of IAM role for GitHub Actions | +| [github\_actions\_iam\_role\_name](#output\_github\_actions\_iam\_role\_name) | Name of IAM role for GitHub Actions | +| [origin\_s3\_bucket\_arn](#output\_origin\_s3\_bucket\_arn) | Origin bucket ARN. | +| [origin\_s3\_bucket\_name](#output\_origin\_s3\_bucket\_name) | Origin bucket name. | + + + +## References + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/spa-s3-cloudfront) - + Cloud Posse's upstream component +- [How do I use CloudFront to serve a static website hosted on Amazon S3?](https://aws.amazon.com/premiumsupport/knowledge-center/cloudfront-serve-static-website/) + +[](https://cpco.io/component) diff --git a/modules/spa-s3-cloudfront/context.tf b/modules/spa-s3-cloudfront/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/spa-s3-cloudfront/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/spa-s3-cloudfront/failover.tf b/modules/spa-s3-cloudfront/failover.tf new file mode 100644 index 000000000..51b84205c --- /dev/null +++ b/modules/spa-s3-cloudfront/failover.tf @@ -0,0 +1,18 @@ +locals { + failover_enabled = local.enabled && try(length(var.failover_s3_origin_environment), 0) > 0 + failover_region = local.failover_enabled ? module.utils.region_az_alt_code_maps.from_fixed[var.failover_s3_origin_environment] : var.region + failover_bucket = local.failover_enabled ? format(var.failover_s3_origin_format, var.namespace, var.failover_s3_origin_environment, var.stage, var.name) : null +} + +module "utils" { + source = "cloudposse/utils/aws" + version = "1.3.0" +} + +data "aws_s3_bucket" "failover_bucket" { + count = local.failover_enabled ? 1 : 0 + + bucket = local.failover_bucket + + provider = aws.failover +} diff --git a/modules/spa-s3-cloudfront/github-actions-iam-policy.tf b/modules/spa-s3-cloudfront/github-actions-iam-policy.tf new file mode 100644 index 000000000..79ceec702 --- /dev/null +++ b/modules/spa-s3-cloudfront/github-actions-iam-policy.tf @@ -0,0 +1,40 @@ +locals { + github_actions_iam_policy = data.aws_iam_policy_document.github_actions_iam_policy.json +} + +data "aws_iam_policy_document" "github_actions_iam_policy" { + statement { + sid = "BucketActions" + effect = "Allow" + actions = [ + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:AbortMultipartUpload" + ] + resources = [module.spa_web.s3_bucket_arn] + } + + statement { + sid = "ObjectActions" + effect = "Allow" + actions = [ + "s3:GetObject", + "s3:DeleteObject", + "s3:PutObject", + "s3:PutObjectTagging", + "s3:PutObjectAcl" + ] + resources = [format("%s/*", module.spa_web.s3_bucket_arn)] + } + + statement { + sid = "CloudfrontActions" + effect = "Allow" + actions = [ + "cloudfront:CreateInvalidation" + ] + resources = [ + module.spa_web.cf_arn + ] + } +} diff --git a/modules/spa-s3-cloudfront/github-actions-iam-role.mixin.tf b/modules/spa-s3-cloudfront/github-actions-iam-role.mixin.tf new file mode 100644 index 000000000..de68c6602 --- /dev/null +++ b/modules/spa-s3-cloudfront/github-actions-iam-role.mixin.tf @@ -0,0 +1,72 @@ +# This mixin requires that a local variable named `github_actions_iam_policy` be defined +# and its value to be a JSON IAM Policy Document defining the permissions for the role. +# It also requires that the `github-oidc-provider` has been previously installed and the +# `github-assume-role-policy.mixin.tf` has been added to `account-map/modules/team-assume-role-policy`. + +variable "github_actions_iam_role_enabled" { + type = bool + description = <<-EOF + Flag to toggle creation of an IAM Role that GitHub Actions can assume to access AWS resources + EOF + default = false +} + +variable "github_actions_allowed_repos" { + type = list(string) + description = < 0 +} + +module "gha_role_name" { + source = "cloudposse/label/null" + version = "0.25.0" + + enabled = local.github_actions_iam_role_enabled + attributes = compact(concat(var.github_actions_iam_role_attributes, ["gha"])) + + context = module.this.context +} + +module "gha_assume_role" { + source = "../account-map/modules/team-assume-role-policy" + + trusted_github_repos = var.github_actions_allowed_repos + + context = module.gha_role_name.context +} + +resource "aws_iam_role" "github_actions" { + count = local.github_actions_iam_role_enabled ? 1 : 0 + name = module.gha_role_name.id + assume_role_policy = module.gha_assume_role.github_assume_role_policy + + inline_policy { + name = module.gha_role_name.id + policy = local.github_actions_iam_policy + } +} + +output "github_actions_iam_role_arn" { + value = one(aws_iam_role.github_actions[*].arn) + description = "ARN of IAM role for GitHub Actions" +} + +output "github_actions_iam_role_name" { + value = one(aws_iam_role.github_actions[*].name) + description = "Name of IAM role for GitHub Actions" +} diff --git a/modules/spa-s3-cloudfront/lambda_edge.tf b/modules/spa-s3-cloudfront/lambda_edge.tf new file mode 100644 index 000000000..b3043b7fc --- /dev/null +++ b/modules/spa-s3-cloudfront/lambda_edge.tf @@ -0,0 +1,128 @@ +locals { + cloudfront_lambda_function_association = concat(var.cloudfront_lambda_function_association, module.lambda_edge.lambda_function_association) +} + +# See CHANGELOG for PR #978: +# https://github.com/cloudposse/terraform-aws-components/pull/978 +# +# Lambda@Edge was moved from submodules to this file +moved { + from = module.lambda-edge-preview.module.lambda_edge.aws_lambda_function.default["origin_request"] + to = module.lambda_edge.aws_lambda_function.default["origin_request"] +} +moved { + from = module.lambda_edge_redirect_404.module.lambda_edge.aws_lambda_function.default["origin_response"] + to = module.lambda_edge.aws_lambda_function.default["origin_response"] +} +moved { + from = module.lambda_edge_redirect_404.module.lambda_edge.aws_lambda_function.default["viewer_request"] + to = module.lambda_edge.aws_lambda_function.default["viewer_request"] +} + +module "lambda_edge_functions" { + source = "cloudposse/config/yaml//modules/deepmerge" + version = "1.0.2" + + count = local.enabled ? 1 : 0 + + maps = [ + local.preview_environment_enabled ? { + origin_request = { + source = [{ + content = <<-EOT + exports.handler = (event, context, callback) => { + const site_fqdn = "${local.site_fqdn}"; + + const { request } = event.Records[0].cf; + const default_prefix = ""; + + console.log('request:' + JSON.stringify(request)); + + let host = null; + + if (request.headers.hasOwnProperty('x-forwarded-host')) { + host = request.headers['x-forwarded-host'][0].value; + } else { + host = site_fqdn; + } + if (host == site_fqdn) { + request.origin.custom.path = default_prefix; // use default prefix if there is no subdomain + } else { + const subdomain = host.replace('.' + site_fqdn, ''); + request.origin.custom.path = `/$${subdomain}`; // use preview prefix + } + + return callback(null, request); + }; + EOT + filename = "index.js" + }] + runtime = var.lambda_edge_runtime + handler = var.lambda_edge_handler + event_type = "origin-request" + include_body = false + }, + viewer_request = { + source = [{ + content = <<-EOT + exports.handler = (event, context, callback) => { + const { request } = event.Records[0].cf; + if ('host' in request.headers) { + request.headers['x-forwarded-host'] = [ + { + key: 'X-Forwarded-Host', + value: request.headers.host[0].value + } + ]; + } + return callback(null, request); + }; + EOT + filename = "index.js" + }] + runtime = var.lambda_edge_runtime + handler = var.lambda_edge_handler + event_type = "viewer-request" + include_body = false + } + } : {}, + var.lambda_edge_functions, + ] +} + +module "lambda_edge" { + source = "cloudposse/cloudfront-s3-cdn/aws//modules/lambda@edge" + version = "0.92.0" + + functions = jsondecode(local.enabled ? jsonencode(module.lambda_edge_functions[0].merged) : jsonencode({})) + destruction_delay = var.lambda_edge_destruction_delay + + providers = { + aws = aws.us-east-1 + } + + context = module.this.context +} + +data "aws_iam_policy_document" "additional_lambda_edge_permission" { + count = local.enabled && (length(var.lambda_edge_allowed_ssm_parameters) > 0) ? 1 : 0 + statement { + effect = "Allow" + actions = ["ssm:GetParameter*"] + resources = var.lambda_edge_allowed_ssm_parameters + } +} + +resource "aws_iam_policy" "additional_lambda_edge_permission" { + count = local.enabled && (length(var.lambda_edge_allowed_ssm_parameters) > 0) ? 1 : 0 + + name = "${module.this.id}-read-ssm-vars" + policy = data.aws_iam_policy_document.additional_lambda_edge_permission[0].json +} + +resource "aws_iam_role_policy_attachment" "additional_lambda_edge_permission" { + for_each = local.enabled && (length(var.lambda_edge_allowed_ssm_parameters) > 0) ? toset(keys(module.lambda_edge_functions[0].merged)) : toset([]) + + policy_arn = aws_iam_policy.additional_lambda_edge_permission[0].arn + role = split("/", module.lambda_edge.lambda_functions[each.key].role_arn)[1] +} diff --git a/modules/spa-s3-cloudfront/main.tf b/modules/spa-s3-cloudfront/main.tf new file mode 100644 index 000000000..cb4faf960 --- /dev/null +++ b/modules/spa-s3-cloudfront/main.tf @@ -0,0 +1,150 @@ +locals { + enabled = module.this.enabled + aws_shield_enabled = local.enabled && var.cloudfront_aws_shield_protection_enabled + aws_waf_enabled = local.enabled && var.cloudfront_aws_waf_protection_enabled + github_runners_enabled = local.enabled && var.github_runners_deployment_principal_arn_enabled + parent_zone_name = length(var.parent_zone_name) > 0 ? var.parent_zone_name : try(module.dns_delegated.outputs.default_domain_name, null) + site_fqdn = length(var.site_fqdn) > 0 ? var.site_fqdn : format("%v.%v.%v", var.site_subdomain, module.this.environment, local.parent_zone_name) + s3_access_log_bucket_name = var.origin_s3_access_log_bucket_name_rendering_enabled ? format("%[1]v-${module.this.tenant != null ? "%[2]v-" : ""}%[3]v-%[4]v-%[5]v", var.namespace, var.tenant, var.environment, var.stage, var.origin_s3_access_log_bucket_name) : var.origin_s3_access_log_bucket_name + cloudfront_access_log_bucket_name = var.cloudfront_access_log_bucket_name_rendering_enabled ? format("%[1]v-${module.this.tenant != null ? "%[2]v-" : ""}%[3]v-%[4]v-%[5]v", var.namespace, var.tenant, var.environment, var.stage, var.cloudfront_access_log_bucket_name) : var.cloudfront_access_log_bucket_name + cloudfront_access_log_prefix = var.cloudfront_access_log_prefix_rendering_enabled ? "${var.cloudfront_access_log_prefix}${module.this.id}" : var.cloudfront_access_log_prefix + origin_deployment_principal_arns = local.github_runners_enabled ? concat(var.origin_deployment_principal_arns, [module.github_runners.outputs.iam_role_arn]) : var.origin_deployment_principal_arns + + # Variables affected by SPA Preview Environments + # + # In order for preview environments to work, there are some specific CloudFront Distribution settings that need to be in place (in order of local variables set below this list): + # 1. A wildcard domain Route53 alias needs to be created for the CloudFront distribution. SANs for the ACM certificate need to be set accordingly. + # 2. The origin must be a custom origin pointing to the S3 website endpoint, not a S3 REST origin (the set of Lambda@Edge functions in lambda_edge.tf do not support the latter). + # 3. Because of #2, the bucket in question cannot have a Public Access Block configuration blocking all public ACLs. + # 4. Because of #2 and #3, it is best practice to enable a password on the S3 website origin so that CloudFront is the single point of entry. + # 5. Object ACLs should be disabled for the origin bucket in the preview environment, otherwise CI/CD jobs uploading to the origin bucket may create object ACLs preventing the content from being served. + # 6. The statement in the bucket policy blocking non-TLS requests from CloudFront needs to be disabled. + # 7. A custom header 'x-forwarded-host' needs to be forwarded to the origin (it is injected by lambda@edge function associated with the Viewer Request event). + # 8. TTL values will be set to 0, because the preview environment is associated with development and debugging, not long term caching. + # 9. The Lambda@Edge functions created by lambda_edge.tf need to be associated with the CloudFront Distribution. + # + # This isn't necessarily the only way to get preview environments to work, but these are the constraints required to achieve the currently tested implementation in modules/lambda-edge-preview. + preview_environment_enabled = local.enabled && var.preview_environment_enabled + preview_environment_wildcard_domain = format("%v.%v", "*", local.site_fqdn) + aliases = concat([local.site_fqdn], local.preview_environment_enabled ? [local.preview_environment_wildcard_domain] : []) + external_aliases = local.preview_environment_enabled ? [] : var.external_aliases + subject_alternative_names = local.preview_environment_enabled ? [local.preview_environment_wildcard_domain] : var.external_aliases + s3_website_enabled = var.s3_website_enabled || local.preview_environment_enabled + s3_website_password_enabled = var.s3_website_password_enabled || local.preview_environment_enabled + s3_object_ownership = local.preview_environment_enabled ? "BucketOwnerEnforced" : var.s3_object_ownership + s3_failover_origin = local.failover_enabled ? [{ + domain_name = data.aws_s3_bucket.failover_bucket[0].bucket_domain_name + origin_id = data.aws_s3_bucket.failover_bucket[0].bucket + origin_path = null + s3_origin_config = { + origin_access_identity = null # will get translated to the origin_access_identity used by the origin created by this module. + } + }] : [] + s3_origins = local.enabled ? concat(local.s3_failover_origin, var.s3_origins) : [] + block_origin_public_access_enabled = var.block_origin_public_access_enabled && !local.preview_environment_enabled + + # SSL Requirements by s3 bucket configuration + # | s3 website enabled | preview enabled | SSL Enabled | + # |--------------------|-----------------|-------------| + # | false | false | true | + # | true | false | false | + # | true | true | false | + # Preview must have website_enabled. + origin_allow_ssl_requests_only = var.origin_allow_ssl_requests_only && !local.s3_website_enabled + + forward_header_values = local.preview_environment_enabled ? concat(var.forward_header_values, ["x-forwarded-host"]) : var.forward_header_values + cloudfront_default_ttl = local.preview_environment_enabled ? 0 : var.cloudfront_default_ttl + cloudfront_min_ttl = local.preview_environment_enabled ? 0 : var.cloudfront_min_ttl + cloudfront_max_ttl = local.preview_environment_enabled ? 0 : var.cloudfront_max_ttl +} + +# Create an ACM and explicitly set it to us-east-1 (requirement of CloudFront) +module "acm_request_certificate" { + source = "cloudposse/acm-request-certificate/aws" + version = "0.18.0" + providers = { + aws = aws.us-east-1 + } + + domain_name = local.site_fqdn + subject_alternative_names = local.subject_alternative_names + zone_name = local.parent_zone_name + process_domain_validation_options = var.process_domain_validation_options + ttl = 300 + + context = module.this.context +} + +module "spa_web" { + source = "cloudposse/cloudfront-s3-cdn/aws" + version = "0.95.0" + + block_origin_public_access_enabled = local.block_origin_public_access_enabled + encryption_enabled = var.origin_encryption_enabled + origin_force_destroy = var.origin_force_destroy + versioning_enabled = var.origin_versioning_enabled + web_acl_id = local.aws_waf_enabled ? module.waf.outputs.acl.arn : null + + cloudfront_access_log_create_bucket = var.cloudfront_access_log_create_bucket + cloudfront_access_log_bucket_name = local.cloudfront_access_log_bucket_name + cloudfront_access_log_prefix = local.cloudfront_access_log_prefix + + index_document = var.cloudfront_index_document + default_root_object = var.cloudfront_default_root_object + + s3_access_logging_enabled = var.origin_s3_access_logging_enabled + s3_access_log_bucket_name = local.s3_access_log_bucket_name + s3_access_log_prefix = var.origin_s3_access_log_prefix + + comment = var.comment + aliases = local.aliases + external_aliases = local.external_aliases + parent_zone_name = local.parent_zone_name + dns_alias_enabled = true + website_enabled = local.s3_website_enabled + s3_website_password_enabled = local.s3_website_password_enabled + allow_ssl_requests_only = local.origin_allow_ssl_requests_only + acm_certificate_arn = module.acm_request_certificate.arn + ipv6_enabled = var.cloudfront_ipv6_enabled + + http_version = var.http_version + allowed_methods = var.cloudfront_allowed_methods + cached_methods = var.cloudfront_cached_methods + custom_error_response = var.cloudfront_custom_error_response + default_ttl = local.cloudfront_default_ttl + min_ttl = local.cloudfront_min_ttl + max_ttl = local.cloudfront_max_ttl + + ordered_cache = local.ordered_cache + forward_cookies = var.forward_cookies + forward_header_values = local.forward_header_values + + compress = var.cloudfront_compress + viewer_protocol_policy = var.cloudfront_viewer_protocol_policy + + deployment_principal_arns = { for arn in local.origin_deployment_principal_arns : arn => [""] } + # Actions the deployment ARNs are allowed to perform on the S3 Origin bucket + deployment_actions = var.origin_deployment_actions + + lambda_function_association = local.cloudfront_lambda_function_association + + custom_origins = var.custom_origins + origin_bucket = var.origin_bucket + origin_groups = local.failover_enabled ? [{ + primary_origin_id = null # will get translated to the origin id of the origin created by this module. + failover_origin_id = data.aws_s3_bucket.failover_bucket[0].bucket + failover_criteria = var.failover_criteria_status_codes + }] : [] + + s3_object_ownership = local.s3_object_ownership + s3_origins = local.s3_origins + + context = module.this.context +} + +resource "aws_shield_protection" "shield_protection" { + count = local.aws_shield_enabled ? 1 : 0 + + name = module.spa_web.cf_id + resource_arn = module.spa_web.cf_arn +} diff --git a/modules/spa-s3-cloudfront/ordered_cache.tf b/modules/spa-s3-cloudfront/ordered_cache.tf new file mode 100644 index 000000000..0b68f3c2b --- /dev/null +++ b/modules/spa-s3-cloudfront/ordered_cache.tf @@ -0,0 +1,49 @@ +resource "aws_cloudfront_cache_policy" "created_cache_policies" { + for_each = { + for cache in var.ordered_cache : cache.cache_policy_name => cache if cache.cache_policy_id == null + } + + comment = var.comment + default_ttl = each.value.default_ttl + max_ttl = each.value.max_ttl + min_ttl = each.value.min_ttl + name = each.value.cache_policy_name + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "none" + } + headers_config { + header_behavior = "none" + } + query_strings_config { + query_string_behavior = "none" + } + } +} + +resource "aws_cloudfront_origin_request_policy" "created_origin_request_policies" { + for_each = { + for cache in var.ordered_cache : cache.origin_request_policy_name => cache if cache.origin_request_policy_id == null + } + + comment = var.comment + name = each.value.origin_request_policy_name + cookies_config { + cookie_behavior = "none" + } + headers_config { + header_behavior = "none" + } + query_strings_config { + query_string_behavior = "none" + } +} + +locals { + ordered_cache = [ + for cache in var.ordered_cache : merge(cache, { + cache_policy_id = cache.cache_policy_id == null ? aws_cloudfront_cache_policy.created_cache_policies[cache.cache_policy_name].id : cache.cache_policy_id + origin_request_policy_id = cache.origin_request_policy_id == null ? aws_cloudfront_origin_request_policy.created_origin_request_policies[cache.origin_request_policy_name].id : cache.origin_request_policy_id + }) + ] +} diff --git a/modules/spa-s3-cloudfront/outputs.tf b/modules/spa-s3-cloudfront/outputs.tf new file mode 100644 index 000000000..529bde7bb --- /dev/null +++ b/modules/spa-s3-cloudfront/outputs.tf @@ -0,0 +1,29 @@ +output "origin_s3_bucket_name" { + value = module.spa_web.s3_bucket + description = "Origin bucket name." +} + +output "origin_s3_bucket_arn" { + value = module.spa_web.s3_bucket_arn + description = "Origin bucket ARN." +} + +output "cloudfront_distribution_domain_name" { + value = module.spa_web.cf_domain_name + description = "Cloudfront Distribution Domain Name." +} + +output "cloudfront_distribution_alias" { + value = module.spa_web.aliases + description = "Cloudfront Distribution Alias Record." +} + +output "cloudfront_distribution_identity_arn" { + value = module.spa_web.cf_identity_iam_arn + description = "CloudFront Distribution Origin Access Identity IAM ARN." +} + +output "failover_s3_bucket_name" { + value = try(data.aws_s3_bucket.failover_bucket[0].bucket, null) + description = "Failover Origin bucket name, if enabled." +} diff --git a/modules/spa-s3-cloudfront/provider-other-regions.tf b/modules/spa-s3-cloudfront/provider-other-regions.tf new file mode 100644 index 000000000..62441d58d --- /dev/null +++ b/modules/spa-s3-cloudfront/provider-other-regions.tf @@ -0,0 +1,34 @@ +provider "aws" { + region = local.failover_region # if var.failover_s3_region is not set, this will fall back on var.region + + alias = "failover" + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +# For cloudfront, the acm has to be created in us-east-1 or it will not work +provider "aws" { + region = "us-east-1" + + alias = "us-east-1" + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/spa-s3-cloudfront/providers.tf b/modules/spa-s3-cloudfront/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/spa-s3-cloudfront/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/spa-s3-cloudfront/remote-state.tf b/modules/spa-s3-cloudfront/remote-state.tf new file mode 100644 index 000000000..8d8e44418 --- /dev/null +++ b/modules/spa-s3-cloudfront/remote-state.tf @@ -0,0 +1,40 @@ +module "waf" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + bypass = !local.aws_waf_enabled + component = var.cloudfront_aws_waf_component_name + privileged = false + environment = var.cloudfront_aws_waf_environment + + defaults = { + acl = { + arn = "" + } + } + + context = module.this.context +} + +module "dns_delegated" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "dns-delegated" + environment = var.dns_delegated_environment_name + + context = module.this.context +} + +module "github_runners" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + bypass = !local.github_runners_enabled + component = var.github_runners_component_name + stage = var.github_runners_stage_name + environment = var.github_runners_environment_name + tenant = try(var.github_runners_tenant_name, module.this.tenant) + + context = module.this.context +} diff --git a/modules/spa-s3-cloudfront/variables.tf b/modules/spa-s3-cloudfront/variables.tf new file mode 100644 index 000000000..cfa689846 --- /dev/null +++ b/modules/spa-s3-cloudfront/variables.tf @@ -0,0 +1,590 @@ +variable "region" { + type = string + description = "AWS Region." +} + +variable "parent_zone_name" { + type = string + default = "" + description = "Parent domain name of site to publish. Defaults to format(parent_zone_name_pattern, stage, environment)." +} + +variable "process_domain_validation_options" { + type = bool + default = true + description = "Flag to enable/disable processing of the record to add to the DNS zone to complete certificate validation" +} + +variable "site_fqdn" { + type = string + default = "" + description = "Fully qualified domain name of site to publish. Overrides site_subdomain and parent_zone_name." +} + +variable "site_subdomain" { + type = string + default = "" + description = "Subdomain to plug into site_name_pattern to make site FQDN." +} + +variable "external_aliases" { + type = list(string) + default = [] + description = <<-EOT + List of FQDN's - Used to set the Alternate Domain Names (CNAMEs) setting on CloudFront. No new Route53 records will be created for these. + + Setting `process_domain_validation_options` to true may cause the component to fail if an external_alias DNS zone is not controlled by Terraform. + + Setting `preview_environment_enabled` to `true` will cause this variable to be ignored. + EOT +} + +variable "s3_website_enabled" { + type = bool + default = false + description = <<-EOT + Set to true to enable the created S3 bucket to serve as a website independently of CloudFront, + and to use that website as the origin. + + Setting `preview_environment_enabled` will implicitly set this to `true`. + EOT +} + +variable "s3_website_password_enabled" { + type = bool + default = false + description = <<-EOT + If set to true, and `s3_website_enabled` is also true, a password will be required in the `Referrer` field of the + HTTP request in order to access the website, and CloudFront will be configured to pass this password in its requests. + This will make it much harder for people to bypass CloudFront and access the S3 website directly via its website endpoint. + EOT +} + +variable "s3_object_ownership" { + type = string + default = "ObjectWriter" + description = "Specifies the S3 object ownership control on the origin bucket. Valid values are `ObjectWriter`, `BucketOwnerPreferred`, and 'BucketOwnerEnforced'." +} + +variable "s3_origins" { + type = list(object({ + domain_name = string + origin_id = string + origin_path = string + s3_origin_config = object({ + origin_access_identity = string + }) + })) + default = [] + description = <<-EOT + A list of S3 [origins](https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#origin-arguments) (in addition to the one created by this component) for this distribution. + S3 buckets configured as websites are `custom_origins`, not `s3_origins`. + Specifying `s3_origin_config.origin_access_identity` as `null` or `""` will have it translated to the `origin_access_identity` used by the origin created by this component. + EOT +} + +variable "origin_bucket" { + type = string + default = null + description = "Name of an existing S3 bucket to use as the origin. If this is not provided, this component will create a new s3 bucket using `var.name` and other context related inputs" +} + +variable "origin_s3_access_logging_enabled" { + type = bool + default = null + description = <<-EOF + Set `true` to deliver S3 Access Logs to the `origin_s3_access_log_bucket_name` bucket. + Defaults to `false` if `origin_s3_access_log_bucket_name` is empty (the default), `true` otherwise. + Must be set explicitly if the access log bucket is being created at the same time as this module is being invoked. + EOF +} + +variable "origin_s3_access_log_bucket_name" { + type = string + default = "" + description = "Name of the existing S3 bucket where S3 Access Logs for the origin Bucket will be delivered. Default is not to enable S3 Access Logging for the origin Bucket." +} + +variable "origin_s3_access_log_bucket_name_rendering_enabled" { + type = bool + description = <<-EOT + If set to `true`, then the S3 origin access logs bucket name will be rendered by calling `format("%v-%v-%v-%v", var.namespace, var.environment, var.stage, var.origin_s3_access_log_bucket_name)`. + Otherwise, the value for `origin_s3_access_log_bucket_name` will need to be the globally unique name of the access logs bucket. + + For example, if this component produces an origin bucket named `eg-ue1-devplatform-example` and `origin_s3_access_log_bucket_name` is set to + `example-s3-access-logs`, then the bucket name will be rendered to be `eg-ue1-devplatform-example-s3-access-logs`. + EOT + default = false +} + +variable "origin_s3_access_log_prefix" { + type = string + default = "" + description = "Prefix to use for S3 Access Log object keys. Defaults to `logs/$${module.this.id}`" +} + +variable "origin_force_destroy" { + type = bool + default = false + description = "A boolean string that indicates all objects should be deleted from the origin Bucket so that the Bucket can be destroyed without error. These objects are not recoverable." +} + +variable "origin_versioning_enabled" { + type = bool + default = false + description = "Enable or disable versioning for the origin Bucket. Versioning is a means of keeping multiple variants of an object in the same bucket." +} + +variable "origin_deployment_principal_arns" { + type = list(string) + description = "List of role ARNs to grant deployment permissions to the origin Bucket." + default = [] +} + +variable "origin_deployment_actions" { + type = list(string) + default = [ + "s3:PutObject", + "s3:PutObjectAcl", + "s3:GetObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:GetBucketLocation", + "s3:AbortMultipartUpload" + ] + description = "List of actions to permit `origin_deployment_principal_arns` to perform on bucket and bucket prefixes (see `origin_deployment_principal_arns`)" +} + +variable "origin_allow_ssl_requests_only" { + type = bool + default = true + description = "Set to `true` in order to have the origin bucket require requests to use Secure Socket Layer (HTTPS/SSL). This will explicitly deny access to HTTP requests" +} + +variable "block_origin_public_access_enabled" { + type = bool + default = true + description = "When set to 'true' the s3 origin bucket will have public access block enabled." +} + +variable "origin_encryption_enabled" { + type = bool + default = true + description = "When set to 'true' the origin Bucket will have aes256 encryption enabled by default." +} + +variable "cloudfront_allowed_methods" { + type = list(string) + default = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + description = "List of allowed methods (e.g. GET, PUT, POST, DELETE, HEAD) for AWS CloudFront." +} + +variable "cloudfront_cached_methods" { + type = list(string) + default = ["GET", "HEAD"] + description = "List of cached methods (e.g. GET, PUT, POST, DELETE, HEAD)." +} + +variable "cloudfront_compress" { + type = bool + default = false + description = "Compress content for web requests that include Accept-Encoding: gzip in the request header." +} + +variable "cloudfront_custom_error_response" { + # http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html#custom-error-pages-procedure + # https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#custom-error-response-arguments + type = list(object({ + error_caching_min_ttl = optional(string, "10") + error_code = string + response_code = string + response_page_path = string + })) + + description = "List of one or more custom error response element maps." + default = [] +} + +variable "cloudfront_access_log_create_bucket" { + type = bool + default = true + description = <<-EOT + When `true` and `cloudfront_access_logging_enabled` is also true, this module will create a new, + separate S3 bucket to receive CloudFront Access Logs. + EOT +} + +variable "cloudfront_access_log_bucket_name" { + type = string + default = "" + description = <<-EOT + When `cloudfront_access_log_create_bucket` is `false`, this is the name of the existing S3 Bucket where + CloudFront Access Logs are to be delivered and is required. IGNORED when `cloudfront_access_log_create_bucket` is `true`. + EOT +} + +variable "cloudfront_access_log_bucket_name_rendering_enabled" { + type = bool + description = <<-EOT + If set to `true`, then the CloudFront origin access logs bucket name will be rendered by calling `format("%v-%v-%v-%v", var.namespace, var.environment, var.stage, var.cloudfront_access_log_bucket_name)`. + Otherwise, the value for `cloudfront_access_log_bucket_name` will need to be the globally unique name of the access logs bucket. + + For example, if this component produces an origin bucket named `eg-ue1-devplatform-example` and `cloudfront_access_log_bucket_name` is set to + `example-cloudfront-access-logs`, then the bucket name will be rendered to be `eg-ue1-devplatform-example-cloudfront-access-logs`. + EOT + default = false +} + +variable "cloudfront_access_log_prefix" { + type = string + default = "" + description = "Prefix to use for CloudFront Access Log object keys. Defaults to no prefix." +} + +variable "cloudfront_access_log_prefix_rendering_enabled" { + type = bool + default = false + description = "Whether or not to dynamically render $${module.this.id} at the end of `var.cloudfront_access_log_prefix`." +} + +variable "cloudfront_aws_shield_protection_enabled" { + description = "Enable or disable AWS Shield Advanced protection for the CloudFront distribution. If set to 'true', a subscription to AWS Shield Advanced must exist in this account." + type = bool + default = false +} + +variable "cloudfront_aws_waf_protection_enabled" { + description = <<-EOT + Enable or disable AWS WAF for the CloudFront distribution. + + This assumes that the `aws-waf-acl-default-cloudfront` component has been deployed to the regional stack corresponding + to `var.waf_acl_environment`. + EOT + type = bool + default = true +} + +variable "cloudfront_aws_waf_environment" { + type = string + description = "The environment where the WAF ACL for CloudFront distribution exists." + default = null +} + +variable "cloudfront_aws_waf_component_name" { + type = string + description = "The name of the component used when deploying WAF ACL" + default = "waf" +} + +variable "cloudfront_default_root_object" { + type = string + default = "index.html" + description = "Object that CloudFront return when requests the root URL." +} + +variable "cloudfront_default_ttl" { + type = number + default = 60 + description = "Default amount of time (in seconds) that an object is in a CloudFront cache." +} + +variable "cloudfront_min_ttl" { + type = number + default = 0 + description = "Minimum amount of time that you want objects to stay in CloudFront caches." +} + +variable "cloudfront_max_ttl" { + type = number + default = 31536000 + description = "Maximum amount of time (in seconds) that an object is in a CloudFront cache." +} + +variable "cloudfront_index_document" { + type = string + default = "index.html" + description = "Amazon S3 returns this index document when requests are made to the root domain or any of the subfolders." +} + +variable "cloudfront_ipv6_enabled" { + type = bool + default = true + description = "Set to true to enable an AAAA DNS record to be set as well as the A record." +} + +variable "cloudfront_viewer_protocol_policy" { + type = string + description = "Limit the protocol users can use to access content. One of `allow-all`, `https-only`, or `redirect-to-https`." + default = "redirect-to-https" +} + +variable "cloudfront_lambda_function_association" { + type = list(object({ + event_type = string + include_body = bool + lambda_arn = string + })) + + description = "A config block that configures the CloudFront distribution with lambda@edge functions for specific events." + default = [] +} + +variable "custom_origins" { + type = list(object({ + domain_name = string + origin_id = string + origin_path = string + custom_headers = list(object({ + name = string + value = string + })) + custom_origin_config = object({ + http_port = number + https_port = number + origin_protocol_policy = string + origin_ssl_protocols = list(string) + origin_keepalive_timeout = number + origin_read_timeout = number + }) + })) + default = [] + description = <<-EOT + A list of additional custom website [origins](https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#origin-arguments) for this distribution. + EOT +} + +variable "dns_delegated_environment_name" { + description = "The environment where `dns-delegated` component is deployed to" + type = string + default = "gbl" +} + +variable "failover_criteria_status_codes" { + type = list(string) + description = "List of HTTP Status Codes to use as the origin group failover criteria." + default = [ + 403, + 404, + 500, + 502 + ] +} + +variable "failover_s3_origin_format" { + type = string + description = <<-EOT + If `var.failover_s3_origin_environment` is supplied, this is the format to use for the failover S3 origin bucket name when + building the name via `format([format], var.namespace, var.failover_s3_origin_environment, var.stage, var.name)` + and then looking it up via the `aws_s3_bucket` Data Source. + + For example, if this component creates an origin of name `eg-ue1-devplatform-example` and `var.failover_s3_origin_environment` + is set to `uw1`, then it is expected that a bucket with the name `eg-uw1-devplatform-example-failover` exists in `us-west-1`. + EOT + default = "%v-%v-%v-%v-failover" +} + +variable "failover_s3_origin_environment" { + type = string + description = <<-EOT + The [fixed name](https://github.com/cloudposse/terraform-aws-utils/blob/399951e552483a4f4c1dc7fbe2675c443f3dbd83/main.tf#L10) of the AWS Region where the + failover S3 origin exists. Setting this variable will enable use of a failover S3 origin, but it is required for the + failover S3 origin to exist beforehand. This variable is used in conjunction with `var.failover_s3_origin_format` to + build out the name of the Failover S3 origin in the specified region. + + For example, if this component creates an origin of name `eg-ue1-devplatform-example` and this variable is set to `uw1`, + then it is expected that a bucket with the name `eg-uw1-devplatform-example-failover` exists in `us-west-1`. + EOT + default = null +} + +variable "forward_cookies" { + type = string + default = "none" + description = "Specifies whether you want CloudFront to forward all or no cookies to the origin. Can be 'all' or 'none'" +} + +variable "forward_header_values" { + type = list(string) + description = "A list of whitelisted header values to forward to the origin (incompatible with `cache_policy_id`)" + default = ["Access-Control-Request-Headers", "Access-Control-Request-Method", "Origin"] +} + +variable "ordered_cache" { + type = list(object({ + target_origin_id = string + path_pattern = string + + allowed_methods = list(string) + cached_methods = list(string) + compress = bool + trusted_signers = list(string) + trusted_key_groups = list(string) + + cache_policy_name = optional(string) + cache_policy_id = optional(string) + origin_request_policy_name = optional(string) + origin_request_policy_id = optional(string) + + viewer_protocol_policy = string + min_ttl = number + default_ttl = number + max_ttl = number + response_headers_policy_id = string + + forward_query_string = bool + forward_header_values = list(string) + forward_cookies = string + forward_cookies_whitelisted_names = list(string) + + lambda_function_association = list(object({ + event_type = string + include_body = bool + lambda_arn = string + })) + + function_association = list(object({ + event_type = string + function_arn = string + })) + })) + default = [] + description = <<-EOT + An ordered list of [cache behaviors](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#cache-behavior-arguments) resource for this distribution. + List in order of precedence (first match wins). This is in addition to the default cache policy. + Set `target_origin_id` to `""` to specify the S3 bucket origin created by this module. + Set `cache_policy_id` to `""` to use `cache_policy_name` for creating a new policy. At least one of the two must be set. + Set `origin_request_policy_id` to `""` to use `origin_request_policy_name` for creating a new policy. At least one of the two must be set. + EOT +} + +variable "preview_environment_enabled" { + type = bool + description = <<-EOT + Enable or disable SPA Preview Environments via Lambda@Edge, i.e. mapping `subdomain.example.com` to the `/subdomain` + path in the origin S3 bucket. + + This variable implicitly affects the following variables: + + * `s3_website_enabled` + * `s3_website_password_enabled` + * `block_origin_public_access_enabled` + * `origin_allow_ssl_requests_only` + * `forward_header_values` + * `cloudfront_default_ttl` + * `cloudfront_min_ttl` + * `cloudfront_max_ttl` + * `cloudfront_lambda_function_association` + EOT + default = false +} + +variable "github_runners_deployment_principal_arn_enabled" { + type = bool + description = "A flag that is used to decide whether or not to include the GitHub Runner's IAM role in origin_deployment_principal_arns list" + default = true +} + +variable "github_runners_component_name" { + type = string + description = "The name of the component that deploys GitHub Runners, used in remote-state lookup" + default = "github-runners" +} + +variable "github_runners_environment_name" { + type = string + description = "The name of the environment where the CloudTrail bucket is provisioned" + default = "ue2" +} + +variable "github_runners_stage_name" { + type = string + description = "The stage name where the CloudTrail bucket is provisioned" + default = "auto" +} + +variable "github_runners_tenant_name" { + type = string + description = "The tenant name where the GitHub Runners are provisioned" + default = null +} + +variable "lambda_edge_functions" { + type = map(object({ + source = optional(list(object({ + filename = string + content = string + }))) + source_dir = optional(string) + source_zip = optional(string) + runtime = string + handler = string + event_type = string + include_body = bool + })) + description = <<-EOT + Lambda@Edge functions to create. + + The key of this map is the name of the Lambda@Edge function. + + This map will be deep merged with each enabled default function. Use deep merge to change or overwrite specific values passed by those function objects. + EOT + default = {} +} + +variable "lambda_edge_runtime" { + type = string + description = <<-EOT + The default Lambda@Edge runtime for all functions. + + This value is deep merged in `module.lambda_edge_functions` with `var.lambda_edge_functions` and can be overwritten for any individual function. + EOT + default = "nodejs16.x" +} + +variable "lambda_edge_handler" { + type = string + description = <<-EOT + The default Lambda@Edge handler for all functions. + + This value is deep merged in `module.lambda_edge_functions` with `var.lambda_edge_functions` and can be overwritten for any individual function. + EOT + default = "index.handler" +} + +variable "lambda_edge_allowed_ssm_parameters" { + type = list(string) + description = "The Lambda@Edge functions will be allowed to access the list of AWS SSM parameter with these ARNs" + default = [] +} + +variable "lambda_edge_destruction_delay" { + type = string + description = <<-EOT + The delay, in [Golang ParseDuration](https://pkg.go.dev/time#ParseDuration) format, to wait before destroying the Lambda@Edge + functions. + + This delay is meant to circumvent Lambda@Edge functions not being immediately deletable following their dissociation from + a CloudFront distribution, since they are replicated to CloudFront Edge servers around the world. + + If set to `null`, no delay will be introduced. + + By default, the delay is 20 minutes. This is because it takes about 3 minutes to destroy a CloudFront distribution, and + around 15 minutes until the Lambda@Edge function is available for deletion, in most cases. + + For more information, see: https://github.com/hashicorp/terraform-provider-aws/issues/1721. + EOT + default = "20m" +} + +variable "http_version" { + type = string + default = "http2" + description = "The maximum HTTP version to support on the distribution. Allowed values are http1.1, http2, http2and3 and http3" +} + +variable "comment" { + type = string + description = "Any comments you want to include about the distribution." + default = "Managed by Terraform" +} diff --git a/modules/spa-s3-cloudfront/versions.tf b/modules/spa-s3-cloudfront/versions.tf new file mode 100644 index 000000000..cc73ffd35 --- /dev/null +++ b/modules/spa-s3-cloudfront/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + } +} diff --git a/modules/spacelift-worker-pool/default.auto.tfvars b/modules/spacelift-worker-pool/default.auto.tfvars deleted file mode 100644 index f711bf5f0..000000000 --- a/modules/spacelift-worker-pool/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -enabled = false - -github_netrc_enabled = true diff --git a/modules/spacelift/README.md b/modules/spacelift/README.md index 85501df58..0eee992d2 100644 --- a/modules/spacelift/README.md +++ b/modules/spacelift/README.md @@ -1,374 +1,413 @@ +--- +tags: + - layer/spacelift + - provider/aws + - provider/spacelift +--- + # Component: `spacelift` -This component is responsible for provisioning Spacelift stacks. +These components are responsible for setting up Spacelift and include three components: `spacelift/admin-stack`, +`spacelift/spaces`, and `spacelift/worker-pool`. Spacelift is a specialized, Terraform-compatible continuous integration and deployment (CI/CD) platform for -infrastructure-as-code. It's designed and implemented by long-time DevOps practitioners based on previous experience with -large-scale installations - dozens of teams, hundreds of engineers and tens of thousands of cloud resources. +infrastructure-as-code. It's designed and implemented by long-time DevOps practitioners based on previous experience +with large-scale installations - dozens of teams, hundreds of engineers and tens of thousands of cloud resources. + +## Stack Configuration + +Spacelift exists outside of the AWS ecosystem, so we define these components as unique to our standard stack +organization. Spacelift Spaces are required before tenant-specific stacks are created in Spacelift, and the root +administrator stack, referred to as `root-gbl-spacelift-admin-stack`, also does not belong to a specific tenant. +Therefore, we define both outside of the standard `core` or `plat` stacks directories. That root administrator stack is +responsible for creating the tenant-specific administrator stacks, `core-gbl-spacelift-admin-stack` and +`plat-gbl-spacelift-admin-stack`. + +Our solution is to define a spacelift-specific configuration file per Spacelift Space. Typically our Spaces would be +`root`, `core`, and `plat`, so we add three files: + +```diff ++ stacks/orgs/NAMESPACE/spacelift.yaml ++ stacks/orgs/NAMESPACE/core/spacelift.yaml ++ stacks/orgs/NAMESPACE/plat/spacelift.yaml +``` + +### Global Configuration + +In order to apply common Spacelift configuration to all stacks, we need to set a few global Spacelift settings. The +`pr-comment-triggered` label will be required to trigger stacks with GitHub comments but is not required otherwise. More +on triggering Spacelift stacks to follow. + +Add the following to `stacks/orgs/NAMESPACE/_defaults.yaml`: + +```yaml +settings: + spacelift: + workspace_enabled: true # enable spacelift by default + before_apply: + - spacelift-configure-paths + before_init: + - spacelift-configure-paths + - spacelift-write-vars + - spacelift-tf-workspace + before_plan: + - spacelift-configure-paths + labels: + - pr-comment-triggered +``` + +Furthermore, specify additional tenant-specific Space configuration for both `core` and `plat` tenants. + +For example, for `core` add the following to `stacks/orgs/NAMESPACE/core/_defaults.yaml`: + +```yaml +terraform: + settings: + spacelift: + space_name: core +``` + +And for `plat` add the following to `stacks/orgs/NAMESPACE/plat/_defaults.yaml`: -## Usage +```yaml +terraform: + settings: + spacelift: + space_name: plat +``` -**Stack Level**: Regional +### Spacelift `root` Space -This component provisions an administrative Spacelift stack and assigns it to a worker pool. Although -the stack can manage stacks in any region, it should be provisioned in the same region as the worker pool. +The `root` Space in Spacelift is responsible for deploying the root administrator stack, `admin-stack`, and the Spaces +component, `spaces`. This Spaces component also includes Spacelift policies. Since the root administrator stack is unique +to tenants, we modify the stack context to create a unique stack slug, `root-gbl-spacelift`. + +`stacks/orgs/NAMESPACE/spacelift.yaml`: ```yaml +import: + - mixins/region/global-region + - orgs/NAMESPACE/_defaults + - catalog/terraform/spacelift/admin-stack + - catalog/terraform/spacelift/spaces + +# These intentionally overwrite the default values +vars: + tenant: root + environment: gbl + stage: spacelift + components: terraform: - spacelift-defaults: + # This admin stack creates other "admin" stacks + admin-stack: metadata: - type: abstract - component: spacelift + component: spacelift/admin-stack + inherits: + - admin-stack/default settings: spacelift: - workspace_enabled: true - administrative: true - autodeploy: true - before_init: [] - component_root: components/terraform/spacelift - stack_destructor_enabled: false - policies_enabled: [] - policies_by_id_enabled: - - trigger-administrative-policy - vars: - # This is to locally apply the stack - external_execution: true - # This should match the version set in the Dockerfile - terraform_version: "1.2.3" - terraform_version_map: - "1": "1.2.3" - # additional defaults - infracost_enabled: false - git_repository: infrastructure - git_branch: main - runner_image: .dkr.ecr..amazonaws.com/:latest - administrative_trigger_policy_enabled: false - worker_pool_name_id_map: {} - autodeploy: true - stack_config_path_template: stacks/%s.yaml - spacelift_component_path: components/terraform - administrative_stack_drift_detection_enabled: true - administrative_stack_drift_detection_reconcile: true - administrative_stack_drift_detection_schedule: - - 0 4 * * * - drift_detection_enabled: true - drift_detection_reconcile: true - drift_detection_schedule: - - 0 4 * * * - aws_role_enabled: false - aws_role_generate_credentials_in_worker: false - stack_destructor_enabled: true - before_init: [] - # Add these existing policies by ID, do not create them, they are already provisioned in Spacelift - policies_by_id_enabled: - - git_push-proposed-run-policy - - git_push-auto-cancel-policy - - plan-default-policy - - trigger-dependencies-policy - policies_available: [] - policies_enabled: [] - policies_by_name_enabled: [] - - # Manages policies, admin stacks, and core OU accounts - spacelift: - metadata: - component: spacelift - inherits: - - spacelift-defaults + root_administrative: true + labels: + - root-admin + - admin vars: enabled: true - # Use context_filters to split up admin stack management - # context_filters: - # stages: - # - artifacts - # - audit - # - auto - # - corp - # - dns - # - identity - # - marketplace - # - network - # - public - # - security - # These are the policies created from https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/tree/master/catalog/policies - # Make sure to remove the .rego suffix - policies_available: - - git_push.proposed-run - - git_push.tracked-run - - plan.default - - trigger.dependencies - - trigger.retries - # This is the global admin policy - - trigger.administrative - # These are the policies added to each spacelift stack created by this admin stack - policies_enabled: - - git_push.proposed-run - - git_push.tracked-run - - plan.default - - trigger.dependencies + root_admin_stack: true # This stack will be created in the root space and will create all the other admin stacks as children. + context_filters: # context_filters determine which child stacks to manage with this admin stack + administrative: true # This stack is managing all the other admin stacks + root_administrative: false # We don't want this stack to also find itself in the config and add itself a second time + labels: + - admin + # attachments only on the root stack + root_stack_policy_attachments: + - TRIGGER Global administrator + # this creates policies for the children (admin) stacks + child_policy_attachments: + - TRIGGER Global administrator ``` -## Prerequisites +#### Deployment -### GitHub Integration +> [!TIP] +> +> The following steps assume that you've already authenticated with Spacelift locally. -1. The GitHub owner will need to sign up for a [free trial of Spacelift](https://spacelift.io/free-trial.html) -1. Once an account is created take note of the URL - usually its `https://.app.spacelift.io/` -1. Create a Login Policy +First deploy Spaces and policies with the `spaces` component: - - Click on Policies then Add Policy - - Use the following policy and replace `GITHUBORG` with the GitHub Organization slug and DEV with the GitHub id for the Dev setting up the Spacelift module. +```bash +atmos terraform apply spaces -s root-gbl-spacelift +``` - ```rego - package spacelift +In the Spacelift UI, you should see each Space and each policy. - # See https://docs.spacelift.io/concepts/policy/login-policy for implementation details. - # Note: Login policies don't affect GitHub organization or SSO admins. - # Note 2: Enabling SSO requires that all users have an IdP (G Suite) account, so we'll just use - # GitHub authentication in the meantime while working with external collaborators. - # Map session input data to human friendly variables to use in policy evaluation +Next, deploy the `root` `admin-stack` with the following: - username := input.session.login - member_of := input.session.teams # Input is friendly name, e.g. "SRE" not "sre" or "@GITHUBORG/sre" - GITHUBORG := input.session.member # Is this user a member of the CUSTOMER GitHub org? +```bash +atmos terraform apply admin-stack -s root-gbl-spacelift +``` - # Define GitHub usernames of non org external collaborators with admin vs. user access - admin_collaborators := { "DEV" } - user_collaborators := { "GITHUBORG" } # Using GITHUBORG as a placeholder to avoid empty set +Now in the Spacelift UI, you should see the administrator stacks created. Typically these should look similar to the +following: - # Grant admin access to GITHUBORG org members in the CloudPosse group - admin { - GITHUBORG - member_of[_] == "CloudPosse" - } +```diff ++ root-gbl-spacelift-admin-stack ++ root-gbl-spacelift-spaces ++ core-gbl-spacelift-admin-stack ++ plat-gbl-spacelift-admin-stack ++ core-ue1-auto-spacelift-worker-pool +``` - # Grant admin access to non-GITHUBORG org accounts in the admin_collaborators set - admin { - # not GITHUBORG - admin_collaborators[username] - } +> [!TIP] +> +> The `spacelift/worker-pool` component is deployed to a specific tenant, stage, and region but is still deployed by the +> root administrator stack. Verify the administrator stack by checking the `managed-by:` label. - # Grant user access to GITHUBORG org members in the Developers group - # allow { - # GITHUBORG - # member_of[_] == "Developers" - # } +Finally, deploy the Spacelift Worker Pool (change the stack-slug to match your configuration): - # Grant user access to non-GITHUBORG org accounts in the user_collaborators set - allow { - not GITHUBORG - user_collaborators[username] - } +```bash +atmos terraform apply spacelift/worker-pool -s core-ue1-auto +``` - # Deny access to any non-GITHUBORG org accounts who aren't defined in external collaborators sets - deny { - not GITHUBORG - not user_collaborators[username] - not admin_collaborators[username] - } - ``` +### Spacelift Tenant-Specific Spaces -## Spacelift Layout +A tenant-specific Space in Spacelift, such as `core` or `plat`, includes the administrator stack for that specific Space +and _all_ components in the given tenant. This administrator stack uses `var.context_filters` to select all components +in the given tenant and create Spacelift stacks for each. Similar to the root administrator stack, we again create a +unique stack slug for each tenant. For example `core-gbl-spacelift` or `plat-gbl-spacelift`. -[Runtime configuration](https://docs.spacelift.io/concepts/configuration/runtime-configuration) is a piece of setup -that is applied to individual runs instead of being global to the stack. -It's defined in `.spacelift/config.yml` YAML file at the root of your repository. -It is required for Spacelift to work with `atmos`. +For example, configure a `core` administrator stack with `stacks/orgs/NAMESPACE/core/spacelift.yaml`. -### Create Spacelift helper scripts +```yaml +import: + - mixins/region/global-region + - orgs/NAMESPACE/core/_defaults + - catalog/terraform/spacelift/admin-stack -[/rootfs/usr/local/bin/spacelift-tf-workspace](/rootfs/usr/local/bin/spacelift-tf-workspace) manages selecting or creating a Terraform workspace; similar to how `atmos` manages workspaces -during a Terraform run. +vars: + tenant: core + environment: gbl + stage: spacelift -[/rootfs/usr/local/bin/spacelift-write-vars](/rootfs/usr/local/bin/spacelift-write-vars) writes the component config using `atmos` to the `spacelift.auto.tfvars.json` file. +components: + terraform: + admin-stack: + metadata: + component: spacelift/admin-stack + inherits: + - admin-stack/default + settings: + spacelift: + labels: # Additional labels for this stack + - admin-stack-name:core + vars: + enabled: true + context_filters: + tenants: ["core"] + labels: # Additional labels added to all children + - admin-stack-name:core # will be used to automatically create the `managed-by:stack-name` label + child_policy_attachments: + - TRIGGER Dependencies +``` -**NOTE**: make sure they are all executable: +Deploy the `core` `admin-stack` with the following: ```bash -chmod +x rootfs/usr/local/bin/spacelift* +atmos terraform apply admin-stack -s core-gbl-spacelift ``` -## Bootstrapping - -After creating & linking Spacelift to this repo (see the -[docs](https://docs.spacelift.io/integrations/github)), follow these steps... +Create the same for the `plat` tenant in `stacks/orgs/NAMESPACE/plat/spacelift.yaml`, update the tenant and +configuration as necessary, and deploy with the following: -### Deploy the [`spacelift-worker-pool`](../spacelift-worker-pool) Component +```bash +atmos terraform apply admin-stack -s plat-gbl-spacelift +``` -See [`spacelift-worker-pool` README](../spacelift-worker-pool/README.md) for the configuration and deployment needs. +Now all stacks for all components should be created in the Spacelift UI. -### Update the `spacelift` catalog +## Triggering Spacelift Runs -1. `git_repository` = Name of `infrastructure` repository -1. `git_branch` = Name of main/master branch -1. `worker_pool_name_id_map` = Map of arbitrary names to IDs Spacelift worker pools, -taken from the `worker_pool_id` output of the `spacelift-worker-pool` component. -1. Set `components.terraform.spacelift.settings.spacelift.worker_pool_name` -to the name of the worker pool you want to use for the `spacelift` component, -the name being the key you set in the `worker_pool_name_id_map` map. +Cloud Posse recommends two options to trigger Spacelift stacks. +### Triggering with Policy Attachments -### Deploy the admin stacks +Historically, all stacks were triggered with three `GIT_PUSH` policies: -Set these ENV vars: +1. [GIT_PUSH Global Administrator](https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/blob/main/catalog/policies/git_push.administrative.rego) + triggers admin stacks +2. [GIT_PUSH Proposed Run](https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/blob/main/catalog/policies/git_push.proposed-run.rego) + triggers Proposed runs (typically Terraform Plan) for all non-admin stacks on Pull Requests +3. [GIT_PUSH Tracked Run](https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/blob/main/catalog/policies/git_push.tracked-run.rego) + triggers Tracked runs (typically Terraform Apply) for all non-admin stacks on merges into `main` -```bash -export SPACELIFT_API_KEY_ENDPOINT=https://.app.spacelift.io -export SPACELIFT_API_KEY_ID=... -export SPACELIFT_API_KEY_SECRET=... -``` +Attach these policies to stacks and Spacelift will trigger them on the respective git push. -The name of the spacelift stack resource will be different depending on the name of the component and the root atmos stack. -This would be the command if the root atmos stack is `core-gbl-auto` and the spacelift component is `spacelift`. +### Triggering with GitHub Comments (Preferred) -``` -atmos terraform apply spacelift --stack core-gbl-auto -target 'module.spacelift.module.stacks["core-gbl-auto-spacelift"]' -``` +Atmos support for `atmos describe affected` made it possible to greatly improve Spacelift's triggering workflow. Now we +can add a GitHub Action to collect all affected components for a given Pull Request and add a GitHub comment to the +given PR with a formatted list of the affected stacks. Then Spacelift can watch for a GitHub comment event and then +trigger stacks based on that comment. -Note that this is the only manually operation you need to perform in `geodesic` using `atmos` to create the initial admin stack. -All other infrastructure stacks wil be created in Spacelift by this admin stack. +In order to set up GitHub Comment triggers, first add the following `GIT_PUSH Plan Affected` policy to the `spaces` +component. +For example, `stacks/catalog/spacelift/spaces.yaml` -## Pull Request Workflow +```yaml +components: + terraform: + spaces: + metadata: + component: spacelift/spaces + settings: + spacelift: + administrative: true + space_name: root + vars: + spaces: + root: + policies: +--- +# This policy will automatically assign itself to stacks and is used to trigger stacks directly from the `cloudposse/github-action-atmos-affected-trigger-spacelift` GitHub action +# This is only used if said GitHub action is set to trigger on "comments" +"GIT_PUSH Plan Affected": + type: GIT_PUSH + labels: + - autoattach:pr-comment-triggered + body: | + package spacelift + + # This policy runs whenever a comment is added to a pull request. It looks for the comment body to contain either: + # /spacelift preview input.stack.id + # /spacelift deploy input.stack.id + # + # If the comment matches those patterns it will queue a tracked run (deploy) or a proposed run (preview). In the case of + # a proposed run, it will also cancel all of the other pending runs for the same branch. + # + # This is being used on conjunction with the GitHub actions `atmos-trigger-spacelift-feature-branch.yaml` and + # `atmos-trigger-spacelift-main-branch.yaml` in .github/workflows to automatically trigger a preview or deploy run based + # on the `atmos describe affected` output. + + track { + commented + contains(input.pull_request.comment, concat(" ", ["/spacelift", "deploy", input.stack.id])) + } + + propose { + commented + contains(input.pull_request.comment, concat(" ", ["/spacelift", "preview", input.stack.id])) + } + + # Ignore if the event is not a comment + ignore { + not commented + } + + # Ignore if the PR has a `spacelift-no-trigger` label + ignore { + input.pull_request.labels[_] = "spacelift-no-trigger" + } + + # Ignore if the PR is a draft and deesnt have a `spacelift-trigger` label + ignore { + input.pull_request.draft + not has_spacelift_trigger_label + } + + has_spacelift_trigger_label { + input.pull_request.labels[_] == "spacelift-trigger" + } + + commented { + input.pull_request.action == "commented" + } + + cancel[run.id] { + run := input.in_progress[_] + run.type == "PROPOSED" + run.state == "QUEUED" + run.branch == input.pull_request.head.branch + } + + # This is a random sample of 10% of the runs + sample { + millis := round(input.request.timestamp_ns / 1e6) + millis % 100 <= 10 + } +``` -1. Create a new branch & make changes -2. Create a new pull request (targeting the `main` branch) -3. View the modified resources directly in the pull request -4. View the successful Spacelift checks in the pull request -5. Merge the pull request and check the Spacelift job +This policy will automatically attach itself to _all_ components that have the `pr-comment-triggered` label, already +defined in `stacks/orgs/NAMESPACE/_defaults.yaml` under `settings.spacelift.labels`. +Next, create two new GitHub Action workflows: -## spacectl +```diff ++ .github/workflows/atmos-trigger-spacelift-feature-branch.yaml ++ .github/workflows/atmos-trigger-spacelift-main-branch.yaml +``` -See docs https://github.com/spaceone-dev/spacectl +The feature branch workflow will create a comment event in Spacelift to run a Proposed run for a given stack. Whereas +the main branch workflow will create a comment event in Spacelift to run a Deploy run for those same stacks. -### Install +#### Feature Branch -``` -β¨  apt install -y spacectl -qq +```yaml +name: "Plan Affected Spacelift Stacks" + +on: + pull_request: + types: + - opened + - synchronize + - reopened + branches: + - main + +jobs: + context: + runs-on: ["self-hosted"] + steps: + - name: Atmos Affected Stacks Trigger Spacelift + uses: cloudposse/github-action-atmos-affected-trigger-spacelift@v1 + with: + atmos-config-path: ./rootfs/usr/local/etc/atmos + github-token: ${{ secrets.GITHUB_TOKEN }} ``` -Setup a profile +This will add a GitHub comment such as: ``` -β¨  spacectl profile login gbl-identity -Enter Spacelift endpoint (eg. https://unicorn.app.spacelift.io/): https://.app.spacelift.io -Select credentials type: 1 for API key, 2 for GitHub access token: 1 -Enter API key ID: 01FKN... -Enter API key secret: +/spacelift preview plat-ue1-sandbox-foobar ``` -### Listing stacks +#### Main Branch -```bash -spacectl stack list +```yaml +name: "Deploy Affected Spacelift Stacks" + +on: + pull_request: + types: [closed] + branches: + - main + +jobs: + run: + if: github.event.pull_request.merged == true + runs-on: ["self-hosted"] + steps: + - name: Atmos Affected Stacks Trigger Spacelift + uses: cloudposse/github-action-atmos-affected-trigger-spacelift@v1 + with: + atmos-config-path: ./rootfs/usr/local/etc/atmos + deploy: true + github-token: ${{ secrets.GITHUB_TOKEN }} + head-ref: ${{ github.sha }}~1 ``` -Grab all the stack ids (use the JSON output to avoid bad chars) +This will add a GitHub comment such as: -```bash -spacectl stack list --output json | jq -r '.[].id' > stacks.txt ``` - -If the latest commit for each stack is desired, run something like this. - -NOTE: remove the `echo` to remove the dry-run functionality - -```bash -cat stacks.txt | while read stack; do echo $stack && echo spacectl stack set-current-commit --sha 25dd359749cfe30c76cce19f58e0a33555256afd --id $stack; done +/spacelift deploy plat-ue1-sandbox-foobar ``` - - - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [spacelift](#requirement\_spacelift) | >= 0.1.29 | -| [utils](#requirement\_utils) | >= 1.3.0, != 1.4.0 | - -## Providers - -No providers. - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [spacelift](#module\_spacelift) | cloudposse/cloud-infrastructure-automation/spacelift | 0.49.5 | -| [this](#module\_this) | cloudposse/label/null | 0.25.0 | - -## Resources - -No resources. - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | -| [administrative\_stack\_drift\_detection\_enabled](#input\_administrative\_stack\_drift\_detection\_enabled) | Flag to enable/disable administrative stack drift detection | `bool` | `true` | no | -| [administrative\_stack\_drift\_detection\_reconcile](#input\_administrative\_stack\_drift\_detection\_reconcile) | Flag to enable/disable administrative stack drift automatic reconciliation. If drift is detected and `reconcile` is turned on, Spacelift will create a tracked run to correct the drift | `bool` | `true` | no | -| [administrative\_stack\_drift\_detection\_schedule](#input\_administrative\_stack\_drift\_detection\_schedule) | List of cron expressions to schedule drift detection for the administrative stack | `list(string)` |
[
"0 4 * * *"
]
| no | -| [administrative\_trigger\_policy\_enabled](#input\_administrative\_trigger\_policy\_enabled) | Flag to enable/disable the global administrative trigger policy | `bool` | `true` | no | -| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [autodeploy](#input\_autodeploy) | Default autodeploy value for all stacks created by this project | `bool` | n/a | yes | -| [aws\_role\_arn](#input\_aws\_role\_arn) | ARN of the AWS IAM role to assume and put its temporary credentials in the runtime environment | `string` | `null` | no | -| [aws\_role\_enabled](#input\_aws\_role\_enabled) | Flag to enable/disable Spacelift to use AWS STS to assume the supplied IAM role and put its temporary credentials in the runtime environment | `bool` | `false` | no | -| [aws\_role\_external\_id](#input\_aws\_role\_external\_id) | Custom external ID (works only for private workers). See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html for more details | `string` | `null` | no | -| [aws\_role\_generate\_credentials\_in\_worker](#input\_aws\_role\_generate\_credentials\_in\_worker) | Flag to enable/disable generating AWS credentials in the private worker after assuming the supplied IAM role | `bool` | `false` | no | -| [before\_init](#input\_before\_init) | List of before-init scripts | `list(string)` | `[]` | no | -| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [context\_filters](#input\_context\_filters) | Context filters to create stacks for specific context information. Valid lists are `namespaces`, `environments`, `tenants`, `stages`. | `map(list(string))` | `{}` | no | -| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | -| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [drift\_detection\_enabled](#input\_drift\_detection\_enabled) | Flag to enable/disable drift detection on the infrastructure stacks | `bool` | `true` | no | -| [drift\_detection\_reconcile](#input\_drift\_detection\_reconcile) | Flag to enable/disable infrastructure stacks drift automatic reconciliation. If drift is detected and `reconcile` is turned on, Spacelift will create a tracked run to correct the drift | `bool` | `true` | no | -| [drift\_detection\_schedule](#input\_drift\_detection\_schedule) | List of cron expressions to schedule drift detection for the infrastructure stacks | `list(string)` |
[
"0 4 * * *"
]
| no | -| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | -| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [external\_execution](#input\_external\_execution) | Set this to true if you're calling this module from outside of a Spacelift stack (e.g. the `complete` example) | `bool` | `false` | no | -| [git\_branch](#input\_git\_branch) | The Git branch name | `string` | `"main"` | no | -| [git\_commit\_sha](#input\_git\_commit\_sha) | The commit SHA for which to trigger a run. Requires `var.spacelift_run_enabled` to be set to `true` | `string` | `null` | no | -| [git\_repository](#input\_git\_repository) | The Git repository name | `string` | n/a | yes | -| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [infracost\_enabled](#input\_infracost\_enabled) | Flag to enable/disable infracost. If this is enabled, it will add infracost label to each stack. See [spacelift infracost](https://docs.spacelift.io/vendors/terraform/infracost) docs for more details. | `bool` | `false` | no | -| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | -| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | -| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | -| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | -| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | -| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [policies\_available](#input\_policies\_available) | List of available default policies to create in Spacelift (these policies will not be attached to Spacelift stacks by default, use `var.policies_enabled`) | `list(string)` |
[
"git_push.proposed-run",
"git_push.tracked-run",
"plan.default",
"trigger.dependencies",
"trigger.retries"
]
| no | -| [policies\_by\_id\_enabled](#input\_policies\_by\_id\_enabled) | List of existing policy IDs to attach to all Spacelift stacks. These policies must already exist in Spacelift | `list(string)` | `[]` | no | -| [policies\_by\_name\_enabled](#input\_policies\_by\_name\_enabled) | List of existing policy names to attach to all Spacelift stacks. These policies must exist in `modules/spacelift/rego-policies` | `list(string)` | `[]` | no | -| [policies\_enabled](#input\_policies\_enabled) | DEPRECATED: Use `policies_by_id_enabled` instead. List of default policies created by this stack to attach to all Spacelift stacks | `list(string)` | `[]` | no | -| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | -| [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [runner\_image](#input\_runner\_image) | Full address & tag of the Spacelift runner image (e.g. on ECR) | `string` | n/a | yes | -| [spacelift\_component\_path](#input\_spacelift\_component\_path) | The Spacelift Component Path | `string` | `"components/terraform"` | no | -| [spacelift\_run\_enabled](#input\_spacelift\_run\_enabled) | Enable/disable creation of the `spacelift_run` resource | `bool` | `false` | no | -| [stack\_config\_path\_template](#input\_stack\_config\_path\_template) | Stack config path template | `string` | `"stacks/%s.yaml"` | no | -| [stack\_destructor\_enabled](#input\_stack\_destructor\_enabled) | Flag to enable/disable the stack destructor to destroy the resources of a stack before deleting the stack itself | `bool` | `false` | no | -| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | -| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | -| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [terraform\_version](#input\_terraform\_version) | Default Terraform version for all stacks created by this project | `string` | n/a | yes | -| [terraform\_version\_map](#input\_terraform\_version\_map) | A map to determine which Terraform patch version to use for each minor version | `map(string)` | `{}` | no | -| [worker\_pool\_id](#input\_worker\_pool\_id) | DEPRECATED: Use worker\_pool\_name\_id\_map instead. Worker pool ID | `string` | `""` | no | -| [worker\_pool\_name\_id\_map](#input\_worker\_pool\_name\_id\_map) | Map of worker pool names to worker pool IDs | `map(any)` | `{}` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| [stacks](#output\_stacks) | Spacelift stacks | - - -## References - -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/spacelift) - Cloud Posse's upstream component - -[](https://cpco.io/component) diff --git a/modules/spacelift/admin-stack/README.md b/modules/spacelift/admin-stack/README.md new file mode 100644 index 000000000..e48e910b3 --- /dev/null +++ b/modules/spacelift/admin-stack/README.md @@ -0,0 +1,278 @@ +--- +tags: + - component/spacelift/admin-stack + - layer/spacelift + - provider/aws + - provider/spacelift +--- + +# Component: `spacelift/admin-stack` + +This component is responsible for creating an administrative [stack](https://docs.spacelift.io/concepts/stack/) and its +corresponding child stacks in the Spacelift organization. + +The component uses a series of `context_filters` to select atmos component instances to manage as child stacks. + +## Usage + +**Stack Level**: Global + +The following are example snippets of how to use this component. For more on Spacelift admin stack usage, see the +[Spacelift README](https://docs.cloudposse.com/components/library/aws/spacelift/) + +First define the default configuration for any admin stack: + +```yaml +# stacks/catalog/spacelift/admin-stack.yaml +components: + terraform: + admin-stack/default: + metadata: + type: abstract + component: spacelift/admin-stack + settings: + spacelift: + administrative: true + autodeploy: true + before_apply: + - spacelift-configure-paths + before_init: + - spacelift-configure-paths + - spacelift-write-vars + - spacelift-tf-workspace + before_plan: + - spacelift-configure-paths + drift_detection_enabled: true + drift_detection_reconcile: true + drift_detection_schedule: + - 0 4 * * * + manage_state: false + policies: {} + vars: + # Organization specific configuration + branch: main + repository: infrastructure + worker_pool_name: "acme-core-ue1-auto-spacelift-worker-pool" + runner_image: 111111111111.dkr.ecr.us-east-1.amazonaws.com/infrastructure:latest + spacelift_spaces_stage_name: "root" + # These values need to be manually updated as external configuration changes + # This should match the version set in the Dockerfile and be updated when the version changes. + terraform_version: "1.3.6" + # Common configuration + administrative: true # Whether this stack can manage other stacks + component_root: components/terraform +``` + +Then define the root-admin stack: + +```yaml +# stacks/orgs/acme/spacelift.yaml +import: + - mixins/region/global-region + - orgs/acme/_defaults + - catalog/terraform/spacelift/admin-stack + - catalog/terraform/spacelift/spaces + +# These intentionally overwrite the default values +vars: + tenant: root + environment: gbl + stage: spacelift + +components: + terraform: + # This admin stack creates other "admin" stacks + admin-stack: + metadata: + component: spacelift/admin-stack + inherits: + - admin-stack/default + settings: + spacelift: + root_administrative: true + labels: + - root-admin + - admin + vars: + enabled: true + root_admin_stack: true # This stack will be created in the root space and will create all the other admin stacks as children. + context_filters: # context_filters determine which child stacks to manage with this admin stack + administrative: true # This stack is managing all the other admin stacks + root_administrative: false # We don't want this stack to also find itself in the config and add itself a second time + labels: + - admin + # attachments only on the root stack + root_stack_policy_attachments: + - TRIGGER Global administrator + # this creates policies for the children (admin) stacks + child_policy_attachments: + - TRIGGER Global administrator +``` + +Finally, define any tenant-specific stacks: + +```yaml +# stacks/orgs/acme/core/spacelift.yaml +import: + - mixins/region/global-region + - orgs/acme/core/_defaults + - catalog/terraform/spacelift/admin-stack + +vars: + tenant: core + environment: gbl + stage: spacelift + +components: + terraform: + admin-stack: + metadata: + component: spacelift/admin-stack + inherits: + - admin-stack/default + settings: + spacelift: + labels: # Additional labels for this stack + - admin-stack-name:core + vars: + enabled: true + context_filters: + tenants: ["core"] + labels: # Additional labels added to all children + - admin-stack-name:core # will be used to automatically create the `managed-by:stack-name` label + child_policy_attachments: + - TRIGGER Dependencies +``` + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 4.0 | +| [null](#requirement\_null) | >= 3.0 | +| [spacelift](#requirement\_spacelift) | >= 0.1.31 | +| [utils](#requirement\_utils) | >= 1.14.0 | + +## Providers + +| Name | Version | +|------|---------| +| [null](#provider\_null) | >= 3.0 | +| [spacelift](#provider\_spacelift) | >= 0.1.31 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [all\_admin\_stacks\_config](#module\_all\_admin\_stacks\_config) | cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stacks-from-atmos-config | 1.5.0 | +| [child\_stack](#module\_child\_stack) | cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stack | 1.6.0 | +| [child\_stacks\_config](#module\_child\_stacks\_config) | cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stacks-from-atmos-config | 1.5.0 | +| [root\_admin\_stack](#module\_root\_admin\_stack) | cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stack | 1.6.0 | +| [root\_admin\_stack\_config](#module\_root\_admin\_stack\_config) | cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stacks-from-atmos-config | 1.5.0 | +| [spaces](#module\_spaces) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +| Name | Type | +|------|------| +| [null_resource.child_stack_parent_precondition](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.public_workers_precondition](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.spaces_precondition](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.workers_precondition](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [spacelift_policy_attachment.root](https://registry.terraform.io/providers/spacelift-io/spacelift/latest/docs/resources/policy_attachment) | resource | +| [spacelift_policies.this](https://registry.terraform.io/providers/spacelift-io/spacelift/latest/docs/data-sources/policies) | data source | +| [spacelift_stacks.this](https://registry.terraform.io/providers/spacelift-io/spacelift/latest/docs/data-sources/stacks) | data source | +| [spacelift_worker_pools.this](https://registry.terraform.io/providers/spacelift-io/spacelift/latest/docs/data-sources/worker_pools) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [admin\_stack\_label](#input\_admin\_stack\_label) | Label to use to identify the admin stack when creating the child stacks | `string` | `"admin-stack-name"` | no | +| [allow\_public\_workers](#input\_allow\_public\_workers) | Whether to allow public workers to be used for this stack | `bool` | `false` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [autodeploy](#input\_autodeploy) | Controls the Spacelift 'autodeploy' option for a stack | `bool` | `false` | no | +| [autoretry](#input\_autoretry) | Controls the Spacelift 'autoretry' option for a stack | `bool` | `false` | no | +| [aws\_role\_arn](#input\_aws\_role\_arn) | ARN of the AWS IAM role to assume and put its temporary credentials in the runtime environment | `string` | `null` | no | +| [aws\_role\_enabled](#input\_aws\_role\_enabled) | Flag to enable/disable Spacelift to use AWS STS to assume the supplied IAM role and put its temporary credentials in the runtime environment | `bool` | `false` | no | +| [aws\_role\_external\_id](#input\_aws\_role\_external\_id) | Custom external ID (works only for private workers). See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html for more details | `string` | `null` | no | +| [aws\_role\_generate\_credentials\_in\_worker](#input\_aws\_role\_generate\_credentials\_in\_worker) | Flag to enable/disable generating AWS credentials in the private worker after assuming the supplied IAM role | `bool` | `true` | no | +| [azure\_devops](#input\_azure\_devops) | Azure DevOps VCS settings | `map(any)` | `null` | no | +| [bitbucket\_cloud](#input\_bitbucket\_cloud) | Bitbucket Cloud VCS settings | `map(any)` | `null` | no | +| [bitbucket\_datacenter](#input\_bitbucket\_datacenter) | Bitbucket Datacenter VCS settings | `map(any)` | `null` | no | +| [branch](#input\_branch) | Specify which branch to use within your infrastructure repo | `string` | `"main"` | no | +| [child\_policy\_attachments](#input\_child\_policy\_attachments) | List of policy attachments to attach to the child stacks created by this module | `set(string)` | `[]` | no | +| [cloudformation](#input\_cloudformation) | CloudFormation-specific configuration. Presence means this Stack is a CloudFormation Stack. | `map(any)` | `null` | no | +| [commit\_sha](#input\_commit\_sha) | The commit SHA for which to trigger a run. Requires `var.spacelift_run_enabled` to be set to `true` | `string` | `null` | no | +| [component\_env](#input\_component\_env) | Map of component ENV variables | `any` | `{}` | no | +| [component\_root](#input\_component\_root) | The path, relative to the root of the repository, where the component can be found | `string` | n/a | yes | +| [component\_vars](#input\_component\_vars) | All Terraform values to be applied to the stack via a mounted file | `any` | `{}` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [context\_attachments](#input\_context\_attachments) | A list of context IDs to attach to this stack | `list(string)` | `[]` | no | +| [context\_filters](#input\_context\_filters) | Context filters to select atmos stacks matching specific criteria to create as children. |
object({
namespaces = optional(list(string), [])
environments = optional(list(string), [])
tenants = optional(list(string), [])
stages = optional(list(string), [])
tags = optional(map(string), {})
administrative = optional(bool)
root_administrative = optional(bool)
})
| n/a | yes | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [description](#input\_description) | Specify description of stack | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [drift\_detection\_enabled](#input\_drift\_detection\_enabled) | Flag to enable/disable drift detection on the infrastructure stacks | `bool` | `false` | no | +| [drift\_detection\_reconcile](#input\_drift\_detection\_reconcile) | Flag to enable/disable infrastructure stacks drift automatic reconciliation. If drift is detected and `reconcile` is turned on, Spacelift will create a tracked run to correct the drift | `bool` | `false` | no | +| [drift\_detection\_schedule](#input\_drift\_detection\_schedule) | List of cron expressions to schedule drift detection for the infrastructure stacks | `list(string)` |
[
"0 4 * * *"
]
| no | +| [drift\_detection\_timezone](#input\_drift\_detection\_timezone) | Timezone in which the schedule is expressed. Defaults to UTC. | `string` | `null` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [excluded\_context\_filters](#input\_excluded\_context\_filters) | Context filters to exclude from stacks matching specific criteria of `var.context_filters`. |
object({
namespaces = optional(list(string), [])
environments = optional(list(string), [])
tenants = optional(list(string), [])
stages = optional(list(string), [])
tags = optional(map(string), {})
})
| `{}` | no | +| [github\_enterprise](#input\_github\_enterprise) | GitHub Enterprise (self-hosted) VCS settings | `map(any)` | `null` | no | +| [gitlab](#input\_gitlab) | GitLab VCS settings | `map(any)` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels](#input\_labels) | A list of labels for the stack | `list(string)` | `[]` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [local\_preview\_enabled](#input\_local\_preview\_enabled) | Indicates whether local preview runs can be triggered on this Stack | `bool` | `false` | no | +| [manage\_state](#input\_manage\_state) | Flag to enable/disable manage\_state setting in stack | `bool` | `false` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [protect\_from\_deletion](#input\_protect\_from\_deletion) | Flag to enable/disable deletion protection. | `bool` | `false` | no | +| [pulumi](#input\_pulumi) | Pulumi-specific configuration. Presence means this Stack is a Pulumi Stack. | `map(any)` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [repository](#input\_repository) | The name of your infrastructure repo | `string` | n/a | yes | +| [root\_admin\_stack](#input\_root\_admin\_stack) | Flag to indicate if this stack is the root admin stack. In this case, the stack will be created in the root space and will create all the other admin stacks as children. | `bool` | `false` | no | +| [root\_stack\_policy\_attachments](#input\_root\_stack\_policy\_attachments) | List of policy attachments to attach to the root admin stack | `set(string)` | `[]` | no | +| [runner\_image](#input\_runner\_image) | The full image name and tag of the Docker image to use in Spacelift | `string` | `null` | no | +| [showcase](#input\_showcase) | Showcase settings | `map(any)` | `null` | no | +| [space\_id](#input\_space\_id) | Place the stack in the specified space\_id | `string` | `"root"` | no | +| [spacelift\_run\_enabled](#input\_spacelift\_run\_enabled) | Enable/disable creation of the `spacelift_run` resource | `bool` | `false` | no | +| [spacelift\_spaces\_component\_name](#input\_spacelift\_spaces\_component\_name) | The component name of the spacelift spaces component | `string` | `"spacelift/spaces"` | no | +| [spacelift\_spaces\_environment\_name](#input\_spacelift\_spaces\_environment\_name) | The environment name of the spacelift spaces component | `string` | `null` | no | +| [spacelift\_spaces\_stage\_name](#input\_spacelift\_spaces\_stage\_name) | The stage name of the spacelift spaces component | `string` | `null` | no | +| [spacelift\_spaces\_tenant\_name](#input\_spacelift\_spaces\_tenant\_name) | The tenant name of the spacelift spaces component | `string` | `null` | no | +| [spacelift\_stack\_dependency\_enabled](#input\_spacelift\_stack\_dependency\_enabled) | If enabled, the `spacelift_stack_dependency` Spacelift resource will be used to create dependencies between stacks instead of using the `depends-on` labels. The `depends-on` labels will be removed from the stacks and the trigger policies for dependencies will be detached | `bool` | `false` | no | +| [stack\_destructor\_enabled](#input\_stack\_destructor\_enabled) | Flag to enable/disable the stack destructor to destroy the resources of the stack before deleting the stack itself | `bool` | `false` | no | +| [stack\_name](#input\_stack\_name) | The name of the Spacelift stack | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [terraform\_smart\_sanitization](#input\_terraform\_smart\_sanitization) | Whether or not to enable [Smart Sanitization](https://docs.spacelift.io/vendors/terraform/resource-sanitization) which will only sanitize values marked as sensitive. | `bool` | `false` | no | +| [terraform\_version](#input\_terraform\_version) | Specify the version of Terraform to use for the stack | `string` | `null` | no | +| [terraform\_version\_map](#input\_terraform\_version\_map) | A map to determine which Terraform patch version to use for each minor version | `map(string)` | `{}` | no | +| [terraform\_workflow\_tool](#input\_terraform\_workflow\_tool) | Defines the tool that will be used to execute the workflow. This can be one of OPEN\_TOFU, TERRAFORM\_FOSS or CUSTOM. Defaults to TERRAFORM\_FOSS. | `string` | `"TERRAFORM_FOSS"` | no | +| [terraform\_workspace](#input\_terraform\_workspace) | Specify the Terraform workspace to use for the stack | `string` | `null` | no | +| [webhook\_enabled](#input\_webhook\_enabled) | Flag to enable/disable the webhook endpoint to which Spacelift sends the POST requests about run state changes | `bool` | `false` | no | +| [webhook\_endpoint](#input\_webhook\_endpoint) | Webhook endpoint to which Spacelift sends the POST requests about run state changes | `string` | `null` | no | +| [webhook\_secret](#input\_webhook\_secret) | Webhook secret used to sign each POST request so you're able to verify that the requests come from Spacelift | `string` | `null` | no | +| [worker\_pool\_name](#input\_worker\_pool\_name) | The atmos stack name of the worker pool. Example: `acme-core-ue2-auto-spacelift-default-worker-pool` | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [child\_stacks](#output\_child\_stacks) | All children stacks managed by this component | +| [root\_stack](#output\_root\_stack) | The root stack, if enabled and created by this component | +| [root\_stack\_id](#output\_root\_stack\_id) | The stack id | + + diff --git a/modules/spacelift/admin-stack/child-stacks.tf b/modules/spacelift/admin-stack/child-stacks.tf new file mode 100644 index 000000000..0d094e2e2 --- /dev/null +++ b/modules/spacelift/admin-stack/child-stacks.tf @@ -0,0 +1,167 @@ +locals { + child_stacks = { + for k, v in module.child_stacks_config.spacelift_stacks : k => v + if local.enabled == true && + try(v.settings.spacelift.workspace_enabled, false) == true + } + + child_stack_policies = { + for k, v in module.all_admin_stacks_config.spacelift_stacks : k => v.vars.child_policy_attachments + if local.enabled == true && + try(v.settings.spacelift.workspace_enabled, false) == true && + try(v.vars.child_policy_attachments, null) != null + } + + child_policies = local.create_root_admin_stack ? var.child_policy_attachments : try(local.child_stack_policies[local.managed_by], null) + child_policy_ids = try([for item in local.child_policies : local.policies[item]], []) + admin_stack_label = try([for item in var.labels : item if startswith(item, format("${var.admin_stack_label}:"))][0], null) + managed_by = local.create_root_admin_stack ? local.root_admin_stack_name : try(data.spacelift_stacks.this[0].stacks[0].name, null) +} + +data "spacelift_stacks" "this" { + count = ( + local.enabled && + local.create_root_admin_stack == false && + local.admin_stack_label != null + ) ? 1 : 0 + labels { + any_of = [local.admin_stack_label] + } +} + +# Ensure no stacks are configured to use public workers if they are not allowed +resource "null_resource" "child_stack_parent_precondition" { + count = local.enabled ? 1 : 0 + lifecycle { + precondition { + condition = local.create_root_admin_stack ? true : length(data.spacelift_stacks.this[0].stacks) > 0 + error_message = "Please apply this stack's parent before applying this stack." + } + } +} + +# Get all of the stack configurations from the atmos config that matched the context_filters and create a stack +# for each one. +module "child_stacks_config" { + source = "cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stacks-from-atmos-config" + version = "1.5.0" + + context_filters = var.context_filters + excluded_context_filters = var.excluded_context_filters + + context = module.this.context +} + +module "child_stack" { + source = "cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stack" + version = "1.6.0" + + for_each = local.child_stacks + + # Only the following attributes are available in `each.value` + # component, base_component, stack, imports, deps, deps_all, vars, settings, env, inheritance, metadata, backend_type, backend, workspace, labels + # They are in the outputs from the module https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/tree/main/modules/spacelift-stacks-from-atmos-config + # The rest are configured in `settings.spacelift` or `vars` for each component, and should be accessed by `each.value.settings.spacelift` and `each.value.vars` + + atmos_stack_name = try(each.value.stack, null) + component_env = try(each.value.env, var.component_env) + component_name = try(each.value.component, null) + component_root = try(join("/", [var.component_root, try(each.value.metadata.component, each.value.component)])) + component_vars = try(each.value.vars, var.component_vars) + terraform_workspace = try(each.value.workspace, var.terraform_workspace) + + labels = concat( + try(each.value.labels, []), + try(each.value.vars.labels, []), + ["managed-by:${local.managed_by}"], + local.create_root_admin_stack ? ["depends-on:${local.root_admin_stack_name}", ""] : [] + ) + + administrative = try(each.value.settings.spacelift.administrative, false) + after_apply = try(each.value.settings.spacelift.after_apply, []) + after_destroy = try(each.value.settings.spacelift.after_destroy, []) + after_init = try(each.value.settings.spacelift.after_init, []) + after_perform = try(each.value.settings.spacelift.after_perform, []) + after_plan = try(each.value.settings.spacelift.after_plan, []) + autodeploy = try(each.value.settings.spacelift.autodeploy, var.autodeploy) + autoretry = try(each.value.settings.spacelift.autoretry, var.autoretry) + aws_role_enabled = try(each.value.settings.spacelift.aws_role_enabled, var.aws_role_enabled) + aws_role_arn = try(each.value.settings.spacelift.aws_role_arn, var.aws_role_arn) + aws_role_external_id = try(each.value.settings.spacelift.aws_role_external_id, var.aws_role_external_id) + aws_role_generate_credentials_in_worker = try(each.value.settings.spacelift.aws_role_generate_credentials_in_worker, var.aws_role_generate_credentials_in_worker) + before_apply = try(each.value.settings.spacelift.before_apply, []) + before_destroy = try(each.value.settings.spacelift.before_destroy, []) + before_init = try(each.value.settings.spacelift.before_init, []) + before_perform = try(each.value.settings.spacelift.before_perform, []) + before_plan = try(each.value.settings.spacelift.before_plan, []) + branch = try(each.value.settings.spacelift.branch, var.branch) + commit_sha = var.commit_sha != null ? var.commit_sha : try(each.value.settings.spacelift.commit_sha, null) + context_attachments = try(each.value.settings.spacelift.context_attachments, var.context_attachments) + description = try(each.value.settings.spacelift.description, var.description) + drift_detection_enabled = try(each.value.settings.spacelift.drift_detection_enabled, var.drift_detection_enabled) + drift_detection_reconcile = try(each.value.settings.spacelift.drift_detection_reconcile, var.drift_detection_reconcile) + drift_detection_schedule = try(each.value.settings.spacelift.drift_detection_schedule, var.drift_detection_schedule) + drift_detection_timezone = try(each.value.settings.spacelift.drift_detection_timezone, var.drift_detection_timezone) + local_preview_enabled = try(each.value.settings.spacelift.local_preview_enabled, var.local_preview_enabled) + manage_state = try(each.value.settings.spacelift.manage_state, var.manage_state) + policy_ids = try(concat(each.value.settings.spacelift.policies, local.child_policy_ids), local.child_policy_ids, []) + protect_from_deletion = try(each.value.settings.spacelift.protect_from_deletion, var.protect_from_deletion) + repository = var.repository + runner_image = try(each.value.settings.spacelift.runner_image, var.runner_image) + spacelift_run_enabled = try(each.value.settings.spacelift.spacelift_run_enabled, var.spacelift_run_enabled) + spacelift_stack_dependency_enabled = try(each.value.settings.spacelift.spacelift_stack_dependency_enabled, var.spacelift_stack_dependency_enabled) + stack_destructor_enabled = try(each.value.settings.spacelift.stack_destructor_enabled, var.stack_destructor_enabled) + stack_name = try(each.value.settings.spacelift.stack_name, each.key) + terraform_smart_sanitization = try(each.value.settings.spacelift.terraform_smart_sanitization, var.terraform_smart_sanitization) + terraform_version = lookup(var.terraform_version_map, try(each.value.settings.spacelift.terraform_version, ""), var.terraform_version) + terraform_workflow_tool = try(each.value.settings.spacelift.terraform_workflow_tool, var.terraform_workflow_tool) + webhook_enabled = try(each.value.settings.spacelift.webhook_enabled, var.webhook_enabled) + webhook_endpoint = try(each.value.settings.spacelift.webhook_endpoint, var.webhook_endpoint) + webhook_secret = try(each.value.settings.spacelift.webhook_secret, var.webhook_secret) + worker_pool_id = try(local.worker_pools[each.value.settings.spacelift.worker_pool_name], local.worker_pools[var.worker_pool_name], null) + + azure_devops = try(each.value.settings.spacelift.azure_devops, var.azure_devops) + bitbucket_cloud = try(each.value.settings.spacelift.bitbucket_cloud, var.bitbucket_cloud) + bitbucket_datacenter = try(each.value.settings.spacelift.bitbucket_datacenter, var.bitbucket_datacenter) + cloudformation = try(each.value.settings.spacelift.cloudformation, var.cloudformation) + github_enterprise = try(each.value.settings.spacelift.github_enterprise, var.github_enterprise) + gitlab = try(each.value.settings.spacelift.gitlab, var.gitlab) + pulumi = try(each.value.settings.spacelift.pulumi, var.pulumi) + showcase = try(each.value.settings.spacelift.showcase, var.showcase) + + # Process `spacelift.space_name` and `spacelift.space_name_pattern` + space_id = local.spaces[ + try( + coalesce( + # if `space_name` is specified, use it + each.value.settings.spacelift.space_name, + # otherwise, try to replace the context tokens in `space_name_template` and use it + # `space_name_template` accepts the following context tokens: {namespace}, {tenant}, {environment}, {stage} + each.value.settings.spacelift.space_name_pattern != "" && each.value.settings.spacelift.space_name_pattern != null ? ( + replace( + replace( + replace( + replace( + each.value.settings.spacelift.space_name_pattern, + "{namespace}", module.this.namespace + ), + "{tenant}", module.this.tenant + ), + "{environment}", module.this.environment + ), + "{stage}", module.this.stage) + ) : "" + ), + var.space_id + ) + ] + + depends_on = [ + null_resource.spaces_precondition, + null_resource.workers_precondition, + spacelift_policy_attachment.root, + null_resource.child_stack_parent_precondition + ] + + context = module.this.context +} diff --git a/modules/spacelift/admin-stack/context.tf b/modules/spacelift/admin-stack/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/spacelift/admin-stack/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/spacelift/admin-stack/main.tf b/modules/spacelift/admin-stack/main.tf new file mode 100644 index 000000000..03bce1f8f --- /dev/null +++ b/modules/spacelift/admin-stack/main.tf @@ -0,0 +1,11 @@ +locals { + enabled = module.this.enabled + create_root_admin_stack = local.enabled && var.root_admin_stack + root_admin_stack_name = local.create_root_admin_stack ? keys(module.root_admin_stack_config.spacelift_stacks)[0] : null + root_admin_stack_config = local.create_root_admin_stack ? module.root_admin_stack_config.spacelift_stacks[local.root_admin_stack_name] : null + + # Create a map of all the policies {policy_name = policy_id} + policies = { for k, v in data.spacelift_policies.this.policies : v.name => v.id } +} + +data "spacelift_policies" "this" {} diff --git a/modules/spacelift/admin-stack/outputs.tf b/modules/spacelift/admin-stack/outputs.tf new file mode 100644 index 000000000..446281515 --- /dev/null +++ b/modules/spacelift/admin-stack/outputs.tf @@ -0,0 +1,16 @@ +output "root_stack_id" { + description = "The stack id" + value = local.enabled && local.create_root_admin_stack ? module.root_admin_stack.id : "" +} + +output "root_stack" { + description = "The root stack, if enabled and created by this component" + value = local.enabled && local.create_root_admin_stack ? module.root_admin_stack : null + sensitive = true +} + +output "child_stacks" { + description = "All children stacks managed by this component" + value = local.enabled ? values(module.child_stack)[*] : [] + sensitive = true +} diff --git a/modules/spacelift/admin-stack/providers.tf b/modules/spacelift/admin-stack/providers.tf new file mode 100644 index 000000000..c95d53819 --- /dev/null +++ b/modules/spacelift/admin-stack/providers.tf @@ -0,0 +1 @@ +provider "spacelift" {} diff --git a/modules/spacelift/admin-stack/remote-state.tf b/modules/spacelift/admin-stack/remote-state.tf new file mode 100644 index 000000000..69834fa59 --- /dev/null +++ b/modules/spacelift/admin-stack/remote-state.tf @@ -0,0 +1,11 @@ +module "spaces" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.spacelift_spaces_component_name + environment = try(var.spacelift_spaces_environment_name, module.this.environment) + stage = try(var.spacelift_spaces_stage_name, module.this.stage) + tenant = try(var.spacelift_spaces_tenant_name, module.this.tenant) + + context = module.this.context +} diff --git a/modules/spacelift/admin-stack/root-admin-stack.tf b/modules/spacelift/admin-stack/root-admin-stack.tf new file mode 100644 index 000000000..2e2b4b38c --- /dev/null +++ b/modules/spacelift/admin-stack/root-admin-stack.tf @@ -0,0 +1,110 @@ +# The root admin stack is a special stack that is used to manage all of the other admin stacks in the the Spacelift +# organization. This stack is denoted by setting the root_administrative property to true in the atmos config. Only one +# such stack is allowed in the Spacelift organization. +module "root_admin_stack_config" { + source = "cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stacks-from-atmos-config" + version = "1.5.0" + + enabled = local.create_root_admin_stack + + context_filters = { + root_administrative = true + } +} + +# This gets the atmos stack config for all of the administrative stacks +module "all_admin_stacks_config" { + source = "cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stacks-from-atmos-config" + version = "1.5.0" + + enabled = local.create_root_admin_stack + + context_filters = { + administrative = true + } +} + +module "root_admin_stack" { + source = "cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-stack" + version = "1.6.0" + + enabled = local.create_root_admin_stack + + # Only the following attributes are available in `local.root_admin_stack_config` + # component, base_component, stack, imports, deps, deps_all, vars, settings, env, inheritance, metadata, backend_type, backend, workspace, labels + # They are in the outputs from the module https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation/tree/main/modules/spacelift-stacks-from-atmos-config + # The rest are configured in `settings.spacelift` or `vars` for each component, and should be accessed by `each.value.settings.spacelift` and `each.value.vars` + + atmos_stack_name = try(local.root_admin_stack_config.stack, null) + component_env = try(local.root_admin_stack_config.env, var.component_env) + component_name = try(local.root_admin_stack_config.component, null) + component_root = try(join("/", [var.component_root, local.root_admin_stack_config.metadata.component]), null) + component_vars = try(local.root_admin_stack_config.vars, var.component_vars) + terraform_workspace = try(local.root_admin_stack_config.workspace, var.terraform_workspace) + labels = concat(try(local.root_admin_stack_config.labels, []), try(var.labels, [])) + + administrative = true + after_apply = try(local.root_admin_stack_config.settings.spacelift.after_apply, []) + after_destroy = try(local.root_admin_stack_config.settings.spacelift.after_destroy, []) + after_init = try(local.root_admin_stack_config.settings.spacelift.after_init, []) + after_perform = try(local.root_admin_stack_config.settings.spacelift.after_perform, []) + after_plan = try(local.root_admin_stack_config.settings.spacelift.after_plan, []) + autodeploy = try(local.root_admin_stack_config.settings.spacelift.autodeploy, var.autodeploy) + autoretry = try(local.root_admin_stack_config.settings.spacelift.autoretry, var.autoretry) + aws_role_enabled = try(local.root_admin_stack_config.settings.spacelift.aws_role_enabled, var.aws_role_enabled) + aws_role_arn = try(local.root_admin_stack_config.settings.spacelift.aws_role_arn, var.aws_role_arn) + aws_role_external_id = try(local.root_admin_stack_config.settings.spacelift.aws_role_external_id, var.aws_role_external_id) + aws_role_generate_credentials_in_worker = try(local.root_admin_stack_config.settings.spacelift.aws_role_generate_credentials_in_worker, var.aws_role_generate_credentials_in_worker) + before_apply = try(local.root_admin_stack_config.settings.spacelift.before_apply, []) + before_destroy = try(local.root_admin_stack_config.settings.spacelift.before_destroy, []) + before_init = try(local.root_admin_stack_config.settings.spacelift.before_init, []) + before_perform = try(local.root_admin_stack_config.settings.spacelift.before_perform, []) + before_plan = try(local.root_admin_stack_config.settings.spacelift.before_plan, []) + branch = try(local.root_admin_stack_config.settings.spacelift.branch, var.branch) + commit_sha = var.commit_sha != null ? var.commit_sha : try(local.root_admin_stack_config.settings.spacelift.commit_sha, null) + context_attachments = try(local.root_admin_stack_config.settings.spacelift.context_attachments, var.context_attachments) + description = try(local.root_admin_stack_config.settings.spacelift.description, var.description) + drift_detection_enabled = try(local.root_admin_stack_config.settings.spacelift.drift_detection_enabled, var.drift_detection_enabled) + drift_detection_reconcile = try(local.root_admin_stack_config.settings.spacelift.drift_detection_reconcile, var.drift_detection_reconcile) + drift_detection_schedule = try(local.root_admin_stack_config.settings.spacelift.drift_detection_schedule, var.drift_detection_schedule) + drift_detection_timezone = try(local.root_admin_stack_config.settings.spacelift.drift_detection_timezone, var.drift_detection_timezone) + local_preview_enabled = try(local.root_admin_stack_config.settings.spacelift.local_preview_enabled, var.local_preview_enabled) + manage_state = try(local.root_admin_stack_config.settings.spacelift.manage_state, var.manage_state) + protect_from_deletion = try(local.root_admin_stack_config.settings.spacelift.protect_from_deletion, var.protect_from_deletion) + repository = var.repository + runner_image = try(local.root_admin_stack_config.settings.spacelift.runner_image, var.runner_image) + space_id = var.space_id + spacelift_run_enabled = coalesce(try(local.root_admin_stack_config.settings.spacelift.spacelift_run_enabled, null), var.spacelift_run_enabled) + spacelift_stack_dependency_enabled = try(local.root_admin_stack_config.settings.spacelift.spacelift_stack_dependency_enabled, var.spacelift_stack_dependency_enabled) + stack_destructor_enabled = try(local.root_admin_stack_config.settings.spacelift.stack_destructor_enabled, var.stack_destructor_enabled) + stack_name = var.stack_name != null ? var.stack_name : local.root_admin_stack_name + terraform_smart_sanitization = try(local.root_admin_stack_config.settings.spacelift.terraform_smart_sanitization, var.terraform_smart_sanitization) + terraform_version = lookup(var.terraform_version_map, try(local.root_admin_stack_config.settings.spacelift.terraform_version, ""), var.terraform_version) + terraform_workflow_tool = try(local.root_admin_stack_config.settings.spacelift.terraform_workflow_tool, var.terraform_workflow_tool) + webhook_enabled = try(local.root_admin_stack_config.settings.spacelift.webhook_enabled, var.webhook_enabled) + webhook_endpoint = try(local.root_admin_stack_config.settings.spacelift.webhook_endpoint, var.webhook_endpoint) + webhook_secret = try(local.root_admin_stack_config.settings.spacelift.webhook_secret, var.webhook_secret) + worker_pool_id = try(local.worker_pools[var.worker_pool_name], null) + + azure_devops = try(local.root_admin_stack_config.settings.spacelift.azure_devops, var.azure_devops) + bitbucket_cloud = try(local.root_admin_stack_config.settings.spacelift.bitbucket_cloud, var.bitbucket_cloud) + bitbucket_datacenter = try(local.root_admin_stack_config.settings.spacelift.bitbucket_datacenter, var.bitbucket_datacenter) + cloudformation = try(local.root_admin_stack_config.settings.spacelift.cloudformation, var.cloudformation) + github_enterprise = try(local.root_admin_stack_config.settings.spacelift.github_enterprise, var.github_enterprise) + gitlab = try(local.root_admin_stack_config.settings.spacelift.gitlab, var.gitlab) + pulumi = try(local.root_admin_stack_config.settings.spacelift.pulumi, var.pulumi) + showcase = try(local.root_admin_stack_config.settings.spacelift.showcase, var.showcase) + + depends_on = [ + null_resource.spaces_precondition, + null_resource.workers_precondition + ] + + context = module.this.context +} + +resource "spacelift_policy_attachment" "root" { + for_each = var.root_stack_policy_attachments + policy_id = local.policies[each.key] + stack_id = module.root_admin_stack.id +} diff --git a/modules/spacelift/admin-stack/spaces.tf b/modules/spacelift/admin-stack/spaces.tf new file mode 100644 index 000000000..b66a208e6 --- /dev/null +++ b/modules/spacelift/admin-stack/spaces.tf @@ -0,0 +1,53 @@ +locals { + # This loops through all of the administrative stacks in the atmos config and extracts the space_name from the + # spacelift.settings metadata. It then creates a set of all of the unique space_names so we can use that to look up + # their IDs from remote state. + unique_spaces_from_config = toset([for k, v in { + for k, v in module.child_stacks_config.spacelift_stacks : k => try( + coalesce( + # if `space_name` is specified, use it + v.settings.spacelift.space_name, + # otherwise, try to replace the context tokens in `space_name_template` and use it + # `space_name_template` accepts the following context tokens: {namespace}, {tenant}, {environment}, {stage} + v.settings.spacelift.space_name_pattern != "" && v.settings.spacelift.space_name_pattern != null ? ( + replace( + replace( + replace( + replace( + v.settings.spacelift.space_name_pattern, + "{namespace}", module.this.namespace + ), + "{tenant}", module.this.tenant + ), + "{environment}", module.this.environment + ), + "{stage}", module.this.stage) + ) : "" + ), + "root" + ) + if try(v.settings.spacelift.workspace_enabled, false) == true + } : v if v != "root"]) + + # Create a map of all the unique spaces {space_name = space_id} + spaces = merge(try({ + for k in local.unique_spaces_from_config : k => module.spaces.outputs.spaces[k].id + }, {}), { + root = "root" + }) + + # Create a list of all the spaces that are defined in config but missing from Spacelift + missing_spaces = setunion(setsubtract(local.unique_spaces_from_config, keys(local.spaces))) +} + +# Ensure all of the spaces referenced in the Atmos config exist in Spacelift +resource "null_resource" "spaces_precondition" { + count = local.enabled ? 1 : 0 + + lifecycle { + precondition { + condition = length(local.missing_spaces) == 0 + error_message = "Please create the following spaces in Spacelift before running this module: ${join(", ", local.missing_spaces)}" + } + } +} diff --git a/modules/spacelift/admin-stack/variables.tf b/modules/spacelift/admin-stack/variables.tf new file mode 100644 index 000000000..883383c66 --- /dev/null +++ b/modules/spacelift/admin-stack/variables.tf @@ -0,0 +1,351 @@ +variable "admin_stack_label" { + description = "Label to use to identify the admin stack when creating the child stacks" + type = string + default = "admin-stack-name" +} + +variable "allow_public_workers" { + type = bool + description = "Whether to allow public workers to be used for this stack" + default = false +} + +variable "autodeploy" { + type = bool + description = "Controls the Spacelift 'autodeploy' option for a stack" + default = false +} + +variable "autoretry" { + type = bool + description = "Controls the Spacelift 'autoretry' option for a stack" + default = false +} + +variable "aws_role_arn" { + type = string + description = "ARN of the AWS IAM role to assume and put its temporary credentials in the runtime environment" + default = null +} + +variable "aws_role_enabled" { + type = bool + description = "Flag to enable/disable Spacelift to use AWS STS to assume the supplied IAM role and put its temporary credentials in the runtime environment" + default = false +} + +variable "aws_role_external_id" { + type = string + description = "Custom external ID (works only for private workers). See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html for more details" + default = null +} + +variable "aws_role_generate_credentials_in_worker" { + type = bool + description = "Flag to enable/disable generating AWS credentials in the private worker after assuming the supplied IAM role" + default = true +} + +variable "azure_devops" { + type = map(any) + description = "Azure DevOps VCS settings" + default = null +} + +variable "bitbucket_cloud" { + type = map(any) + description = "Bitbucket Cloud VCS settings" + default = null +} + +variable "bitbucket_datacenter" { + type = map(any) + description = "Bitbucket Datacenter VCS settings" + default = null +} + +variable "branch" { + type = string + description = "Specify which branch to use within your infrastructure repo" + default = "main" +} + +variable "child_policy_attachments" { + description = "List of policy attachments to attach to the child stacks created by this module" + type = set(string) + default = [] +} + +variable "cloudformation" { + type = map(any) + description = "CloudFormation-specific configuration. Presence means this Stack is a CloudFormation Stack." + default = null +} + +variable "commit_sha" { + type = string + description = "The commit SHA for which to trigger a run. Requires `var.spacelift_run_enabled` to be set to `true`" + default = null +} + +variable "component_env" { + type = any + default = {} + description = "Map of component ENV variables" +} + +variable "component_root" { + type = string + description = "The path, relative to the root of the repository, where the component can be found" +} + +variable "component_vars" { + type = any + default = {} + description = "All Terraform values to be applied to the stack via a mounted file" +} + +variable "context_attachments" { + type = list(string) + description = "A list of context IDs to attach to this stack" + default = [] +} + +variable "context_filters" { + description = "Context filters to select atmos stacks matching specific criteria to create as children." + type = object({ + namespaces = optional(list(string), []) + environments = optional(list(string), []) + tenants = optional(list(string), []) + stages = optional(list(string), []) + tags = optional(map(string), {}) + administrative = optional(bool) + root_administrative = optional(bool) + }) +} + +variable "excluded_context_filters" { + description = "Context filters to exclude from stacks matching specific criteria of `var.context_filters`." + default = {} + type = object({ + namespaces = optional(list(string), []) + environments = optional(list(string), []) + tenants = optional(list(string), []) + stages = optional(list(string), []) + tags = optional(map(string), {}) + }) +} + +variable "description" { + type = string + description = "Specify description of stack" + default = null +} + +variable "drift_detection_enabled" { + type = bool + description = "Flag to enable/disable drift detection on the infrastructure stacks" + default = false +} + +variable "drift_detection_reconcile" { + type = bool + description = "Flag to enable/disable infrastructure stacks drift automatic reconciliation. If drift is detected and `reconcile` is turned on, Spacelift will create a tracked run to correct the drift" + default = false +} + +variable "drift_detection_schedule" { + type = list(string) + description = "List of cron expressions to schedule drift detection for the infrastructure stacks" + default = ["0 4 * * *"] +} + +variable "drift_detection_timezone" { + type = string + description = "Timezone in which the schedule is expressed. Defaults to UTC." + default = null +} + +variable "github_enterprise" { + type = map(any) + description = "GitHub Enterprise (self-hosted) VCS settings" + default = null +} + +variable "gitlab" { + type = map(any) + description = "GitLab VCS settings" + default = null +} + +variable "labels" { + type = list(string) + description = "A list of labels for the stack" + default = [] +} + +variable "local_preview_enabled" { + type = bool + description = "Indicates whether local preview runs can be triggered on this Stack" + default = false +} + +variable "manage_state" { + type = bool + description = "Flag to enable/disable manage_state setting in stack" + default = false +} + +variable "protect_from_deletion" { + type = bool + description = "Flag to enable/disable deletion protection." + default = false +} + +variable "pulumi" { + type = map(any) + description = "Pulumi-specific configuration. Presence means this Stack is a Pulumi Stack." + default = null +} + +variable "repository" { + type = string + description = "The name of your infrastructure repo" +} + +variable "root_admin_stack" { + description = "Flag to indicate if this stack is the root admin stack. In this case, the stack will be created in the root space and will create all the other admin stacks as children." + type = bool + default = false +} + +variable "root_stack_policy_attachments" { + description = "List of policy attachments to attach to the root admin stack" + type = set(string) + default = [] +} + +variable "runner_image" { + type = string + description = "The full image name and tag of the Docker image to use in Spacelift" + default = null +} + +variable "showcase" { + type = map(any) + description = "Showcase settings" + default = null +} + +variable "space_id" { + type = string + description = "Place the stack in the specified space_id" + default = "root" +} + +variable "spacelift_run_enabled" { + type = bool + description = "Enable/disable creation of the `spacelift_run` resource" + default = false +} + +variable "spacelift_spaces_environment_name" { + type = string + description = "The environment name of the spacelift spaces component" + default = null +} + +variable "spacelift_spaces_stage_name" { + type = string + description = "The stage name of the spacelift spaces component" + default = null +} + +variable "spacelift_spaces_tenant_name" { + type = string + description = "The tenant name of the spacelift spaces component" + default = null +} + +variable "spacelift_spaces_component_name" { + type = string + description = "The component name of the spacelift spaces component" + default = "spacelift/spaces" +} + +variable "spacelift_stack_dependency_enabled" { + type = bool + description = "If enabled, the `spacelift_stack_dependency` Spacelift resource will be used to create dependencies between stacks instead of using the `depends-on` labels. The `depends-on` labels will be removed from the stacks and the trigger policies for dependencies will be detached" + default = false +} + +variable "stack_destructor_enabled" { + type = bool + description = "Flag to enable/disable the stack destructor to destroy the resources of the stack before deleting the stack itself" + default = false +} + +variable "stack_name" { + type = string + description = "The name of the Spacelift stack" + default = null +} + +variable "terraform_smart_sanitization" { + type = bool + description = "Whether or not to enable [Smart Sanitization](https://docs.spacelift.io/vendors/terraform/resource-sanitization) which will only sanitize values marked as sensitive." + default = false +} + +variable "terraform_version" { + type = string + description = "Specify the version of Terraform to use for the stack" + default = null +} + +variable "terraform_version_map" { + type = map(string) + description = "A map to determine which Terraform patch version to use for each minor version" + default = {} +} + +variable "terraform_workflow_tool" { + type = string + description = "Defines the tool that will be used to execute the workflow. This can be one of OPEN_TOFU, TERRAFORM_FOSS or CUSTOM. Defaults to TERRAFORM_FOSS." + default = "TERRAFORM_FOSS" + + validation { + condition = contains(["OPEN_TOFU", "TERRAFORM_FOSS", "CUSTOM"], var.terraform_workflow_tool) + error_message = "Valid values for terraform_workflow_tool are (OPEN_TOFU, TERRAFORM_FOSS, CUSTOM)." + } +} + +variable "terraform_workspace" { + type = string + description = "Specify the Terraform workspace to use for the stack" + default = null +} + +variable "webhook_enabled" { + type = bool + description = "Flag to enable/disable the webhook endpoint to which Spacelift sends the POST requests about run state changes" + default = false +} + +variable "webhook_endpoint" { + type = string + description = "Webhook endpoint to which Spacelift sends the POST requests about run state changes" + default = null +} + +variable "webhook_secret" { + type = string + description = "Webhook secret used to sign each POST request so you're able to verify that the requests come from Spacelift" + default = null +} + +variable "worker_pool_name" { + type = string + description = "The atmos stack name of the worker pool. Example: `acme-core-ue2-auto-spacelift-default-worker-pool`" + default = null +} diff --git a/modules/spacelift/admin-stack/versions.tf b/modules/spacelift/admin-stack/versions.tf new file mode 100644 index 000000000..1bcb05a75 --- /dev/null +++ b/modules/spacelift/admin-stack/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + spacelift = { + source = "spacelift-io/spacelift" + version = ">= 0.1.31" + } + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.0" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.14.0" + } + } +} diff --git a/modules/spacelift/admin-stack/workers.tf b/modules/spacelift/admin-stack/workers.tf new file mode 100644 index 000000000..2a512152c --- /dev/null +++ b/modules/spacelift/admin-stack/workers.tf @@ -0,0 +1,49 @@ +locals { + # This loops through all of the stacks in the atmos config and extracts the worker_name. It then creates a set of all + # of the unique worker_names so we can use that to make sure that the worker pool exists in Spacelift. + # + # If a worker pool is not defined in the atmos config, then it will default to a fake "public" value so we can + # check below if any stacks are configured to use public workers. + unique_workers_from_config = toset([for k, v in { + for k, v in module.child_stacks_config.spacelift_stacks : k => coalesce(try(v.settings.spacelift.worker_pool_name, var.worker_pool_name), "public") + if try(v.settings.spacelift.workspace_enabled, false) == true + } : v]) + + # Create a map of all the worker pools that exist in spacelift {worker_pool_name = worker_pool_id} + worker_pools = { for k, v in data.spacelift_worker_pools.this.worker_pools : v.name => v.worker_pool_id } + + # Create a list of all the worker pools that are defined in config but missing from Spacelift + missing_workers = setunion(setsubtract(local.unique_workers_from_config, keys(local.worker_pools))) +} + +data "spacelift_worker_pools" "this" { +} + +# Ensure no stacks are configured to use public workers if they are not allowed +resource "null_resource" "public_workers_precondition" { + count = local.enabled ? 1 : 0 + lifecycle { + precondition { + condition = var.allow_public_workers == true || contains(local.missing_workers, "public") == false + error_message = "Use of public workers is not allowed. Please create worker pool(s) in Spacelift and assign all stacks to a worker before running this module." + } + } +} + +# Ensure all of the spaces referenced in the atmos config exist in Spacelift +resource "null_resource" "workers_precondition" { + count = local.enabled ? 1 : 0 + + depends_on = [null_resource.public_workers_precondition] + + lifecycle { + precondition { + condition = (var.allow_public_workers == false && length(local.missing_workers) == 0) || ( + var.allow_public_workers == true && + length(local.missing_workers) == 1 + && contains(local.missing_workers, "public") + ) + error_message = "Please create the following workers in Spacelift before running this module: ${join(", ", local.missing_workers)}" + } + } +} diff --git a/modules/spacelift/bin/spacelift-configure b/modules/spacelift/bin/spacelift-configure deleted file mode 100755 index 43a3bade2..000000000 --- a/modules/spacelift/bin/spacelift-configure +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -set -ex - -# Link the default terraform binary to Spacelift's Terraform installation path of `/bin/terraform`. -# Because the Terraform commands are executed as just `terraform` by `atmos` (unless otherwise specified) -# and also in scripts, and the default PATH has `/usr/bin` before `/bin`, -# plain 'terraform' would otherwise resolve to the Docker container's -# chosen version of Terraform, not Spacelift's configured version. - -ln -sfTv /bin/terraform /usr/bin/terraform -echo "Using Terraform: " -which terraform -terraform version - -# Remove -x for security and cleaner output -set +x - -# Log the AWS authentication settings -identity=$(unset AWS_PROFILE && aws sts get-caller-identity --query Arn --output text) - -printf "\nIAM Role without profile is %s\n\n" "$identity" - -# If you want to have dynamic AWS config file or profile selection, do it here. -# For example: -# if (printf "%s" "$identity" | grep -q -- -prod-); then -# printf "Detected production\n\n" -# ln -sfTv /etc/aws-config/aws-config-spacelift-production /etc/aws-config/aws-config-spacelift -# else -# printf "Configuring for non-production environment\n\n" -# ln -sfTv /etc/aws-config/aws-config-spacelift-non-production /etc/aws-config/aws-config-spacelift -# fi - -printf "\nAWS_CONFIG_FILE set to %s\n" "$AWS_CONFIG_FILE" -printf "AWS_PROFILE set to %s\n\n" "$AWS_PROFILE" - -echo "+ crudini --get --format=ini $AWS_CONFIG_FILE \"profile $AWS_PROFILE\"" -crudini --get --format=ini $AWS_CONFIG_FILE "profile $AWS_PROFILE" - -effective_arn="$(aws sts get-caller-identity --query Arn --output text)" - -printf "\nEffective AWS Role Arn is %s\n\n\n" "$effective_arn" diff --git a/modules/spacelift/bin/spacelift-git-use-https b/modules/spacelift/bin/spacelift-git-use-https deleted file mode 100755 index 809ee358b..000000000 --- a/modules/spacelift/bin/spacelift-git-use-https +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -set -ex - -# Spacelift can use a PAT via a .netrc file, in which case any -# git@github.com: urls need to be converted to HTTPS urls or Spacelift will fail. -# This allows us to use SSH paths throughout the codebase so local plans work -# while maintaining compatibility with Spacelift. - -# The URL "git@github.com:" is used by `git` (e.g. `git clone`) -git config --global url."https://github.com/".insteadOf "git@github.com:" -# The URL "ssh://git@github.com/" is used by Terraform (e.g. `terraform init --from-module=...`) -# NOTE: we use `--add` to append the second URL to the config file -git config --global url."https://github.com/".insteadOf "ssh://git@github.com/" --add diff --git a/modules/spacelift/bin/spacelift-tf-workspace b/modules/spacelift/bin/spacelift-tf-workspace deleted file mode 100755 index 70fef3f64..000000000 --- a/modules/spacelift/bin/spacelift-tf-workspace +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# This goes before set -ex because we do not need to echo the commands -# and we do not really care if they fail, they are just informational. -backend_profile=$(jq -r '.terraform.backend.s3.profile' backend.tf.json) - -if [[ -n $backend_profile ]] && [[ $backend_profile != null ]]; then - printf "\nBackend configured to use profile %s\n" "$backend_profile" - printf "Which maps to Role ARN %s\n\n" $(crudini --get --format=sh $AWS_CONFIG_FILE "profile $backend_profile" role_arn | cut -f2 -d=) -fi - -# Add -x for troubleshooting -set -ex -o pipefail - -terraform init -reconfigure - -printf "\n\nSelecting Terraform workspace...\n" - -# We have explicitly set up `backend.tf.json` in the same step where we set up the varfile, so to avoid surprises, do not regenerate it now -atmos terraform workspace "$ATMOS_COMPONENT" --stack="$ATMOS_STACK" --auto-generate-backend-file=false || { - printf "%s\n" "$?" - set +x - printf "\n\nUnable to select workspace\n" - echo "+ crudini --get --format=ini $AWS_CONFIG_FILE \"profile $AWS_PROFILE\"" - crudini --get --format=ini $AWS_CONFIG_FILE "profile $AWS_PROFILE" - printf "\n\n" - false -} - -# Remove -x for security -set +x diff --git a/modules/spacelift/bin/spacelift-write-vars b/modules/spacelift/bin/spacelift-write-vars deleted file mode 100755 index 775983044..000000000 --- a/modules/spacelift/bin/spacelift-write-vars +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Add -x for troubleshooting -set -ex -o pipefail - -function main() { - if [[ -z $ATMOS_STACK ]] || [[ -z $ATMOS_COMPONENT ]]; then - echo "Missing required environment variable" >&2 - echo " ATMOS_STACK=$ATMOS_STACK" >&2 - echo " ATMOS_COMPONENT=$ATMOS_COMPONENT" >&2 - return 3 - fi - - echo "Writing Stack variables to spacelift.auto.tfvars.json for Spacelift..." - - atmos terraform generate varfile "$ATMOS_COMPONENT" --stack="$ATMOS_STACK" -f spacelift.auto.tfvars.json >/dev/null - jq . + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [aws](#requirement\_aws) | >= 4.0 | +| [spacelift](#requirement\_spacelift) | >= 0.1.31 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [policy](#module\_policy) | cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-policy | 1.7.0 | +| [space](#module\_space) | cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-space | 1.6.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [spaces](#input\_spaces) | A map of all Spaces to create in Spacelift |
map(object({
parent_space_id = string,
description = optional(string),
inherit_entities = optional(bool, false),
labels = optional(set(string), []),
policies = optional(map(object({
body = optional(string),
body_url = optional(string),
body_url_version = optional(string, "master"),
body_file_path = optional(string),
type = optional(string),
labels = optional(set(string), []),
})), {}),
}))
| n/a | yes | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [policies](#output\_policies) | The policies created by this component | +| [spaces](#output\_spaces) | The spaces created by this component | + + diff --git a/modules/spacelift/spaces/context.tf b/modules/spacelift/spaces/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/spacelift/spaces/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/spacelift/spaces/main.tf b/modules/spacelift/spaces/main.tf new file mode 100644 index 000000000..bcaa6ffe6 --- /dev/null +++ b/modules/spacelift/spaces/main.tf @@ -0,0 +1,69 @@ +locals { + enabled = module.this.enabled + + spaces = local.enabled ? { for item in values(module.space)[*].space : item.name => { + description = item.description + id = item.id + inherit_entities = item.inherit_entities + labels = toset(item.labels) + parent_space_id = item.parent_space_id + } + } : {} + + # Create a map of all the policies {policy_name = policy} + policies = local.enabled ? { for item in distinct(values(module.policy)[*].policy) : item.name => { + id = item.id + type = item.type + labels = toset(item.labels) + space_id = item.space_id + } + } : {} + + policy_inputs = local.enabled ? { + for k, v in var.spaces : k => { + for pn, p in v.policies : pn => { + body = p.body + body_url = p.body_url + body_url_version = p.body_url_version + body_file_path = p.body_file_path + labels = setunion(toset(v.labels), toset(p.labels)) + name = pn + space_id = k == "root" ? "root" : module.space[k].space_id + type = p.type + } + } + } : {} + + all_policies_inputs = merge([for k, v in local.policy_inputs : v if length(keys(v)) > 0]...) +} + +module "space" { + source = "cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-space" + version = "1.6.0" + + # Create a space for each entry in the `spaces` variable, except for the root space which already exists by default + # and cannot be deleted. + for_each = { for k, v in var.spaces : k => v if k != "root" } + + space_name = each.key + parent_space_id = each.value.parent_space_id + description = each.value.description + inherit_entities_from_parent = each.value.inherit_entities + labels = each.value.labels +} + +module "policy" { + source = "cloudposse/cloud-infrastructure-automation/spacelift//modules/spacelift-policy" + version = "1.7.0" + + for_each = local.all_policies_inputs + + policy_name = each.key + body = each.value.body + body_url = each.value.body_url + body_url_version = each.value.body_url_version + body_file_path = each.value.body_file_path + type = each.value.type + labels = each.value.labels + space_id = each.value.space_id +} diff --git a/modules/spacelift/spaces/outputs.tf b/modules/spacelift/spaces/outputs.tf new file mode 100644 index 000000000..eca2e02c7 --- /dev/null +++ b/modules/spacelift/spaces/outputs.tf @@ -0,0 +1,9 @@ +output "spaces" { + description = "The spaces created by this component" + value = local.enabled ? local.spaces : {} +} + +output "policies" { + description = "The policies created by this component" + value = local.enabled ? local.policies : {} +} diff --git a/modules/spacelift/spaces/providers.tf b/modules/spacelift/spaces/providers.tf new file mode 100644 index 000000000..c95d53819 --- /dev/null +++ b/modules/spacelift/spaces/providers.tf @@ -0,0 +1 @@ +provider "spacelift" {} diff --git a/modules/spacelift/spaces/variables.tf b/modules/spacelift/spaces/variables.tf new file mode 100644 index 000000000..f11aed45c --- /dev/null +++ b/modules/spacelift/spaces/variables.tf @@ -0,0 +1,17 @@ +variable "spaces" { + type = map(object({ + parent_space_id = string, + description = optional(string), + inherit_entities = optional(bool, false), + labels = optional(set(string), []), + policies = optional(map(object({ + body = optional(string), + body_url = optional(string), + body_url_version = optional(string, "master"), + body_file_path = optional(string), + type = optional(string), + labels = optional(set(string), []), + })), {}), + })) + description = "A map of all Spaces to create in Spacelift" +} diff --git a/modules/spacelift/spaces/versions.tf b/modules/spacelift/spaces/versions.tf new file mode 100644 index 000000000..1174cd191 --- /dev/null +++ b/modules/spacelift/spaces/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + spacelift = { + source = "spacelift-io/spacelift" + version = ">= 0.1.31" + } + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/spacelift/versions.tf b/modules/spacelift/versions.tf deleted file mode 100644 index d096a3b97..000000000 --- a/modules/spacelift/versions.tf +++ /dev/null @@ -1,15 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - spacelift = { - source = "spacelift-io/spacelift" - version = ">= 0.1.29" - } - utils = { - source = "cloudposse/utils" - # problem with 1.4.0 - version = ">= 1.3.0, != 1.4.0" - } - } -} diff --git a/modules/spacelift/worker-pool/README.md b/modules/spacelift/worker-pool/README.md new file mode 100644 index 000000000..073e41011 --- /dev/null +++ b/modules/spacelift/worker-pool/README.md @@ -0,0 +1,282 @@ +--- +tags: + - component/spacelift/worker-pool + - layer/spacelift + - provider/aws + - provider/spacelift +--- + +# Component: `spacelift/worker-pool` + +This component is responsible for provisioning Spacelift worker pools. + +By default, workers are given pull access to the configured ECR, permission to assume the `spacelift` team role in the +identity account (although you must also configure the `spacelift` team in the identity account to allow the workers to +assume the role via `trusted_role_arns`), and have the following AWS managed IAM policies attached: + +- AmazonSSMManagedInstanceCore +- AutoScalingReadOnlyAccess +- AWSXRayDaemonWriteAccess +- CloudWatchAgentServerPolicy + +Among other things, this allows workers with SSM agent installed to be accessed via SSM Session Manager. + +```bash +aws ssm start-session --target +``` + +## Usage + +**Stack Level**: Regional + +Here's an example snippet for how to use this component. + +```yaml +# stacks/catalog/spacelift/worker-pool.yaml +components: + terraform: + spacelift/worker-pool: + settings: + spacelift: + administrative: true + space_name: root + vars: + enabled: true + spacelift_api_endpoint: https://.app.spacelift.io + spacelift_spaces_tenant_name: "acme" + spacelift_spaces_environment_name: "gbl" + spacelift_spaces_stage_name: "root" + account_map_tenant_name: core + ecr_environment_name: ue1 + ecr_repo_name: infrastructure + ecr_stage_name: artifacts + ecr_tenant_name: core + # Set a low scaling threshold to ensure new workers are launched as soon as the current one(s) are busy + cpu_utilization_high_threshold_percent: 10 + cpu_utilization_low_threshold_percent: 5 + default_cooldown: 300 + desired_capacity: null + health_check_grace_period: 300 + health_check_type: EC2 + infracost_enabled: true + instance_type: t3.small + max_size: 3 + min_size: 1 + name: spacelift-worker-pool + scale_down_cooldown_seconds: 2700 + spacelift_agents_per_node: 1 + wait_for_capacity_timeout: 5m + block_device_mappings: + - device_name: "/dev/xvda" + no_device: null + virtual_name: null + ebs: + delete_on_termination: null + encrypted: false + iops: null + kms_key_id: null + snapshot_id: null + volume_size: 100 + volume_type: "gp2" +``` + +### Impacts on billing + +While scaling the workload for Spacelift, keep in mind that each agent connection counts against your quota of +self-hosted workers. The number of EC2 instances you have running is _not_ going to affect your Spacelift bill. As an +example, if you had 3 EC2 instances in your Spacelift worker pool, and you configured `spacelift_agents_per_node` to be +`3`, you would see your Spacelift bill report 9 agents being run. Take care while configuring the worker pool for your +Spacelift infrastructure. + +## Configuration + +### Docker Image on ECR + +Build and tag a Docker image for this repository and push to ECR. Ensure the account where this component is deployed +has read-only access to the ECR repository. + +### API Key + +Prior to deployment, the API key must exist in SSM. The key must have admin permissions. + +To generate the key, please follow +[these instructions](https://docs.spacelift.io/integrations/api.html#spacelift-api-key-token). Once generated, write the +API key ID and secret to the SSM key store at the following locations within the same AWS account and region where the +Spacelift worker pool will reside. + +| Key | SSM Path | Type | +| ------- | ----------------------- | -------------- | +| API ID | `/spacelift/key_id` | `SecureString` | +| API Key | `/spacelift/key_secret` | `SecureString` | + +_HINT_: The API key ID is displayed as an upper-case, 16-character alphanumeric value next to the key name in the API +key list. + +Save the keys using `chamber` using the correct profile for where Spacelift worker pool is provisioned + +``` +AWS_PROFILE=acme-gbl-auto-admin chamber write spacelift key_id 1234567890123456 +AWS_PROFILE=acme-gbl-auto-admin chamber write spacelift key_secret abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz +``` + +### IAM configuration + +After provisioning the component, you must give the created instance role permission to assume the Spacelift worker +role. This is done by adding `iam_role_arn` from the output to the `trusted_role_arns` list for the `spacelift` role in +`aws-teams`. + + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | +| [cloudinit](#requirement\_cloudinit) | >= 2.2.0 | +| [spacelift](#requirement\_spacelift) | >= 0.1.2 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.9.0 | +| [cloudinit](#provider\_cloudinit) | >= 2.2.0 | +| [spacelift](#provider\_spacelift) | >= 0.1.2 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [autoscale\_group](#module\_autoscale\_group) | cloudposse/ec2-autoscale-group/aws | 0.35.1 | +| [ecr](#module\_ecr) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [iam\_label](#module\_iam\_label) | cloudposse/label/null | 0.25.0 | +| [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | +| [security\_group](#module\_security\_group) | cloudposse/security-group/aws | 2.2.0 | +| [spaces](#module\_spaces) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_instance_profile.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | +| [aws_iam_policy.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [spacelift_worker_pool.primary](https://registry.terraform.io/providers/spacelift-io/spacelift/latest/docs/resources/worker_pool) | resource | +| [aws_ami.spacelift](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_ssm_parameter.spacelift_key_id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_ssm_parameter.spacelift_key_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [cloudinit_config.config](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [account\_map\_environment\_name](#input\_account\_map\_environment\_name) | The name of the environment where `account_map` is provisioned | `string` | `"gbl"` | no | +| [account\_map\_stage\_name](#input\_account\_map\_stage\_name) | The name of the stage where `account_map` is provisioned | `string` | `"root"` | no | +| [account\_map\_tenant\_name](#input\_account\_map\_tenant\_name) | The name of the tenant where `account_map` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [architecture](#input\_architecture) | OS architecture of the EC2 instance AMI | `list(string)` |
[
"x86_64"
]
| no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [aws\_config\_file](#input\_aws\_config\_file) | The AWS\_CONFIG\_FILE used by the worker. Can be overridden by `/.spacelift/config.yml`. | `string` | `"/etc/aws-config/aws-config-spacelift"` | no | +| [aws\_profile](#input\_aws\_profile) | The AWS\_PROFILE used by the worker. If not specified, `"${var.namespace}-identity"` will be used.
Can be overridden by `/.spacelift/config.yml`. | `string` | `null` | no | +| [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI |
list(object({
device_name = string
no_device = bool
virtual_name = string
ebs = object({
delete_on_termination = bool
encrypted = bool
iops = number
kms_key_id = string
snapshot_id = string
volume_size = number
volume_type = string
})
}))
| `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [cpu\_utilization\_high\_threshold\_percent](#input\_cpu\_utilization\_high\_threshold\_percent) | CPU utilization high threshold | `number` | n/a | yes | +| [cpu\_utilization\_low\_threshold\_percent](#input\_cpu\_utilization\_low\_threshold\_percent) | CPU utilization low threshold | `number` | n/a | yes | +| [custom\_spacelift\_ami](#input\_custom\_spacelift\_ami) | Custom spacelift AMI | `bool` | `false` | no | +| [default\_cooldown](#input\_default\_cooldown) | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start | `number` | `300` | no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [desired\_capacity](#input\_desired\_capacity) | The number of Amazon EC2 instances that should be running in the group, if not set will use `min_size` as value | `number` | `null` | no | +| [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `false` | no | +| [ecr\_environment\_name](#input\_ecr\_environment\_name) | The name of the environment where `ecr` is provisioned | `string` | `""` | no | +| [ecr\_region](#input\_ecr\_region) | AWS region that contains the ECR infrastructure repo | `string` | `""` | no | +| [ecr\_repo\_name](#input\_ecr\_repo\_name) | ECR repository name | `string` | n/a | yes | +| [ecr\_stage\_name](#input\_ecr\_stage\_name) | The name of the stage where `ecr` is provisioned | `string` | `"artifacts"` | no | +| [ecr\_tenant\_name](#input\_ecr\_tenant\_name) | The name of the tenant where `ecr` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | +| [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [github\_netrc\_enabled](#input\_github\_netrc\_enabled) | Whether to create a GitHub .netrc file so Spacelift can clone private GitHub repositories. | `bool` | `false` | no | +| [github\_netrc\_ssm\_path\_token](#input\_github\_netrc\_ssm\_path\_token) | If `github_netrc` is enabled, this is the SSM path to retrieve the GitHub token. | `string` | `"/github/token"` | no | +| [github\_netrc\_ssm\_path\_user](#input\_github\_netrc\_ssm\_path\_user) | If `github_netrc` is enabled, this is the SSM path to retrieve the GitHub user | `string` | `"/github/user"` | no | +| [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `300` | no | +| [health\_check\_type](#input\_health\_check\_type) | Controls how health checking is done. Valid values are `EC2` or `ELB` | `string` | `"EC2"` | no | +| [iam\_attributes](#input\_iam\_attributes) | Additional attributes to add to the IDs of the IAM role and policy | `list(string)` | `[]` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [infracost\_api\_token\_ssm\_path](#input\_infracost\_api\_token\_ssm\_path) | This is the SSM path to retrieve and set the INFRACOST\_API\_TOKEN environment variable | `string` | `"/infracost/token"` | no | +| [infracost\_cli\_args](#input\_infracost\_cli\_args) | These are the CLI args passed to infracost | `string` | `""` | no | +| [infracost\_enabled](#input\_infracost\_enabled) | Whether to enable infracost for Spacelift stacks | `bool` | `false` | no | +| [infracost\_warn\_on\_failure](#input\_infracost\_warn\_on\_failure) | A failure executing Infracost, or a non-zero exit code being returned from the command will cause runs to fail. If this is true, this will only warn instead of failing the stack. | `bool` | `true` | no | +| [instance\_lifetime](#input\_instance\_lifetime) | Number of seconds after which the instance will be terminated. The default is set to 14 days. | `number` | `1209600` | no | +| [instance\_refresh](#input\_instance\_refresh) | The instance refresh definition. If this block is configured, an Instance Refresh will be started when the Auto Scaling Group is updated |
object({
strategy = string
preferences = object({
instance_warmup = optional(number, null)
min_healthy_percentage = optional(number, null)
skip_matching = optional(bool, null)
auto_rollback = optional(bool, null)
})
triggers = optional(list(string), [])
})
| `null` | no | +| [instance\_type](#input\_instance\_type) | EC2 instance type to use for workers | `string` | `"r5n.large"` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | +| [launch\_template\_version](#input\_launch\_template\_version) | Launch template version to use for workers. Note that instance refresh settings are IGNORED unless template version is empty | `string` | `"$Latest"` | no | +| [max\_size](#input\_max\_size) | The maximum size of the autoscale group | `number` | n/a | yes | +| [min\_size](#input\_min\_size) | The minimum size of the autoscale group | `number` | n/a | yes | +| [mixed\_instances\_policy](#input\_mixed\_instances\_policy) | Policy to use a mixed group of on-demand/spot of different types. Launch template is automatically generated. https://www.terraform.io/docs/providers/aws/r/autoscaling_group.html#mixed_instances_policy-1 |
object({
instances_distribution = object({
on_demand_allocation_strategy = string
on_demand_base_capacity = number
on_demand_percentage_above_base_capacity = number
spot_allocation_strategy = string
spot_instance_pools = number
spot_max_price = string
})
override = list(object({
instance_type = string
weighted_capacity = number
}))
})
| `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [scale\_down\_cooldown\_seconds](#input\_scale\_down\_cooldown\_seconds) | The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start | `number` | `300` | no | +| [space\_name](#input\_space\_name) | The name of the Space to create the worker pool in | `string` | `"root"` | no | +| [spacelift\_agents\_per\_node](#input\_spacelift\_agents\_per\_node) | Number of Spacelift agents to run on one worker node. NOTE: This affects billable units. Spacelift charges per agent. | `number` | `1` | no | +| [spacelift\_ami\_id](#input\_spacelift\_ami\_id) | AMI ID of Spacelift worker pool image | `string` | `null` | no | +| [spacelift\_api\_endpoint](#input\_spacelift\_api\_endpoint) | The Spacelift API endpoint URL (e.g. https://example.app.spacelift.io) | `string` | n/a | yes | +| [spacelift\_aws\_account\_id](#input\_spacelift\_aws\_account\_id) | AWS Account ID owned by Spacelift | `string` | `"643313122712"` | no | +| [spacelift\_domain\_name](#input\_spacelift\_domain\_name) | Top-level domain name to use for pulling the launcher binary | `string` | `"spacelift.io"` | no | +| [spacelift\_runner\_image](#input\_spacelift\_runner\_image) | URL of ECR image to use for Spacelift | `string` | `""` | no | +| [spacelift\_spaces\_component\_name](#input\_spacelift\_spaces\_component\_name) | The name of the spacelift spaces component | `string` | `"spacelift/spaces"` | no | +| [spacelift\_spaces\_environment\_name](#input\_spacelift\_spaces\_environment\_name) | The environment name of the spacelift spaces component | `string` | `null` | no | +| [spacelift\_spaces\_stage\_name](#input\_spacelift\_spaces\_stage\_name) | The stage name of the spacelift spaces component | `string` | `null` | no | +| [spacelift\_spaces\_tenant\_name](#input\_spacelift\_spaces\_tenant\_name) | The tenant name of the spacelift spaces component | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | +| [termination\_policies](#input\_termination\_policies) | A list of policies to decide how the instances in the auto scale group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `Default` | `list(string)` |
[
"OldestLaunchConfiguration"
]
| no | +| [wait\_for\_capacity\_timeout](#input\_wait\_for\_capacity\_timeout) | A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [autoscaling\_group\_arn](#output\_autoscaling\_group\_arn) | The ARN for this AutoScaling Group | +| [autoscaling\_group\_default\_cooldown](#output\_autoscaling\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity | +| [autoscaling\_group\_health\_check\_grace\_period](#output\_autoscaling\_group\_health\_check\_grace\_period) | Time after instance comes into service before checking health | +| [autoscaling\_group\_health\_check\_type](#output\_autoscaling\_group\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | +| [autoscaling\_group\_id](#output\_autoscaling\_group\_id) | The autoscaling group id | +| [autoscaling\_group\_max\_size](#output\_autoscaling\_group\_max\_size) | The maximum size of the autoscale group | +| [autoscaling\_group\_min\_size](#output\_autoscaling\_group\_min\_size) | The minimum size of the autoscale group | +| [autoscaling\_group\_name](#output\_autoscaling\_group\_name) | The autoscaling group name | +| [iam\_role\_arn](#output\_iam\_role\_arn) | Spacelift IAM Role ARN | +| [iam\_role\_id](#output\_iam\_role\_id) | Spacelift IAM Role ID | +| [iam\_role\_name](#output\_iam\_role\_name) | Spacelift IAM Role name | +| [launch\_template\_arn](#output\_launch\_template\_arn) | The ARN of the launch template | +| [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template | +| [security\_group\_arn](#output\_security\_group\_arn) | Spacelift Security Group ARN | +| [security\_group\_id](#output\_security\_group\_id) | Spacelift Security Group ID | +| [security\_group\_name](#output\_security\_group\_name) | Spacelift Security Group Name | +| [worker\_pool\_id](#output\_worker\_pool\_id) | Spacelift worker pool ID | +| [worker\_pool\_name](#output\_worker\_pool\_name) | Spacelift worker pool name | + + + +## References + +- [cloudposse/terraform-spacelift-cloud-infrastructure-automation](https://github.com/cloudposse/terraform-spacelift-cloud-infrastructure-automation) - + Cloud Posse's related upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/spacelift-worker-pool) - + Cloud Posse's upstream component + +[](https://cpco.io/component) diff --git a/modules/spacelift/worker-pool/context.tf b/modules/spacelift/worker-pool/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/spacelift/worker-pool/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/spacelift/worker-pool/data.tf b/modules/spacelift/worker-pool/data.tf new file mode 100644 index 000000000..462c52953 --- /dev/null +++ b/modules/spacelift/worker-pool/data.tf @@ -0,0 +1,39 @@ +data "aws_partition" "current" { + count = local.enabled ? 1 : 0 +} + +# The Spacelift always validates its credentials, so we always pass api_key_id and api_key_secret +data "aws_ssm_parameter" "spacelift_key_id" { + name = "/spacelift/key_id" +} + +data "aws_ssm_parameter" "spacelift_key_secret" { + name = "/spacelift/key_secret" +} + +data "aws_ami" "spacelift" { + count = local.enabled && var.spacelift_ami_id == null ? 1 : 0 + + owners = var.custom_spacelift_ami ? ["self"] : [var.spacelift_aws_account_id] + most_recent = true + + filter { + name = "name" + values = ["spacelift-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = var.architecture + } +} diff --git a/modules/spacelift/worker-pool/iam.tf b/modules/spacelift/worker-pool/iam.tf new file mode 100644 index 000000000..6fc90960a --- /dev/null +++ b/modules/spacelift/worker-pool/iam.tf @@ -0,0 +1,90 @@ +module "iam_label" { + source = "cloudposse/label/null" + version = "0.25.0" + + attributes = var.iam_attributes + + context = module.this.context +} + +data "aws_iam_policy_document" "assume_role_policy" { + count = local.enabled ? 1 : 0 + + statement { + actions = [ + "sts:AssumeRole", + "sts:SetSourceIdentity", + "sts:TagSession", + ] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +locals { + identity_account_name = module.account_map.outputs.identity_account_account_name + role_arn_template = module.account_map.outputs.iam_role_arn_templates[local.identity_account_name] +} + +data "aws_iam_policy_document" "default" { + count = local.enabled ? 1 : 0 + + statement { + actions = [ + "sts:AssumeRole", + "sts:SetSourceIdentity", + "sts:TagSession", + ] + resources = formatlist(local.role_arn_template, ["spacelift"]) + } + + statement { + actions = ["ecr:GetAuthorizationToken"] + resources = ["*"] + } + + statement { + actions = [ + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage" + ] + resources = [local.ecr_repo_arn] + } +} + +resource "aws_iam_policy" "default" { + count = local.enabled ? 1 : 0 + + name = module.iam_label.id + policy = join("", data.aws_iam_policy_document.default[*].json) + + tags = module.iam_label.tags +} + +resource "aws_iam_role" "default" { + count = local.enabled ? 1 : 0 + + name = module.iam_label.id + assume_role_policy = join("", data.aws_iam_policy_document.assume_role_policy[*].json) + managed_policy_arns = [ + join("", aws_iam_policy.default[*].arn), + "arn:${join("", data.aws_partition.current[*].partition)}:iam::aws:policy/AutoScalingReadOnlyAccess", + "arn:${join("", data.aws_partition.current[*].partition)}:iam::aws:policy/CloudWatchAgentServerPolicy", + "arn:${join("", data.aws_partition.current[*].partition)}:iam::aws:policy/AmazonSSMManagedInstanceCore", + "arn:${join("", data.aws_partition.current[*].partition)}:iam::aws:policy/AWSXRayDaemonWriteAccess" + ] + + tags = module.iam_label.tags +} + +resource "aws_iam_instance_profile" "default" { + count = local.enabled ? 1 : 0 + + name = module.iam_label.id + role = join("", aws_iam_role.default[*].name) + + tags = module.iam_label.tags +} diff --git a/modules/spacelift/worker-pool/main.tf b/modules/spacelift/worker-pool/main.tf new file mode 100644 index 000000000..7abbb3434 --- /dev/null +++ b/modules/spacelift/worker-pool/main.tf @@ -0,0 +1,127 @@ +locals { + enabled = module.this.enabled + vpc_id = module.vpc.outputs.vpc_id + vpc_private_subnet_ids = module.vpc.outputs.private_subnet_ids + ecr_repo_arn = module.ecr.outputs.ecr_repo_arn_map[var.ecr_repo_name] + ecr_repo_url = module.ecr.outputs.ecr_repo_url_map[var.ecr_repo_name] + ecr_account_id = element(split(".", local.ecr_repo_url), 0) + ecr_region = coalesce(var.ecr_region, var.region) + spacelift_runner_image = coalesce(var.spacelift_runner_image, local.ecr_repo_url) + userdata_template = "${path.module}/templates/user-data.sh" + spacelift_service_file = "${path.module}/templates/spacelift@.service" + + spacelift_service_config = <<-END + #cloud-config + ${jsonencode({ + write_files = flatten([ + { + path = "/etc/systemd/system/spacelift@.service" + permissions = "0655" + owner = "root:root" + content = file(local.spacelift_service_file) + } + ] + ) +})} +END + +space_id = lookup(module.spaces.outputs, var.space_name, "root") +} + +resource "spacelift_worker_pool" "primary" { + count = local.enabled ? 1 : 0 + + name = module.this.id + description = "Deployed to ${var.region} within '${join("-", compact([module.this.tenant, module.this.stage]))}' AWS account" + + space_id = local.space_id +} + +data "cloudinit_config" "config" { + count = local.enabled ? 1 : 0 + + gzip = false + base64_encode = true + + part { + content_type = "text/cloud-config" + filename = "spacelift@.service" + content = local.spacelift_service_config + } + + part { + content_type = "text/x-shellscript" + filename = "user-data.sh" + content = templatefile(local.userdata_template, { + region = var.region + aws_config_file = var.aws_config_file + aws_profile = coalesce(var.aws_profile, "${var.namespace}-identity") + ecr_region = local.ecr_region + ecr_account_id = local.ecr_account_id + spacelift_runner_image = local.spacelift_runner_image + spacelift_worker_pool_private_key = join("", spacelift_worker_pool.primary[*].private_key) + spacelift_worker_pool_config = join("", spacelift_worker_pool.primary[*].config) + spacelift_domain_name = var.spacelift_domain_name + github_netrc_enabled = var.github_netrc_enabled + github_netrc_ssm_path_token = var.github_netrc_ssm_path_token + github_netrc_ssm_path_user = var.github_netrc_ssm_path_user + spacelift_agents_per_node = var.spacelift_agents_per_node + infracost_enabled = var.infracost_enabled + infracost_api_token_ssm_path = var.infracost_api_token_ssm_path + infracost_warn_on_failure = var.infracost_warn_on_failure + infracost_cli_args = var.infracost_cli_args + }) + } +} + +module "security_group" { + source = "cloudposse/security-group/aws" + version = "2.2.0" + + security_group_description = "Security Group for Spacelift worker pool" + allow_all_egress = true + + vpc_id = local.vpc_id + + context = module.this.context +} + +module "autoscale_group" { + source = "cloudposse/ec2-autoscale-group/aws" + version = "0.35.1" + + image_id = var.spacelift_ami_id == null ? join("", data.aws_ami.spacelift[*].image_id) : var.spacelift_ami_id + instance_type = var.instance_type + mixed_instances_policy = var.mixed_instances_policy + subnet_ids = local.vpc_private_subnet_ids + health_check_type = var.health_check_type + health_check_grace_period = var.health_check_grace_period + user_data_base64 = join("", data.cloudinit_config.config[*].rendered) + associate_public_ip_address = false + block_device_mappings = var.block_device_mappings + iam_instance_profile_name = join("", aws_iam_instance_profile.default[*].name) + security_group_ids = [module.security_group.id] + termination_policies = var.termination_policies + wait_for_capacity_timeout = var.wait_for_capacity_timeout + ebs_optimized = var.ebs_optimized + + min_size = var.min_size + max_size = var.max_size + desired_capacity = var.desired_capacity + + # Auto-scaling policies and CloudWatch metric alarms + autoscaling_policies_enabled = true + default_cooldown = var.default_cooldown + scale_down_cooldown_seconds = var.scale_down_cooldown_seconds + cpu_utilization_high_threshold_percent = var.cpu_utilization_high_threshold_percent + cpu_utilization_low_threshold_percent = var.cpu_utilization_low_threshold_percent + + max_instance_lifetime = var.instance_lifetime + + # The instance refresh definition + # If this block is configured, an Instance Refresh will be started when the Auto Scaling Group is updated + instance_refresh = var.instance_refresh + launch_template_version = var.instance_refresh == null ? "$Latest" : "" + + context = module.this.context +} diff --git a/modules/spacelift/worker-pool/outputs.tf b/modules/spacelift/worker-pool/outputs.tf new file mode 100644 index 000000000..7db7df4f6 --- /dev/null +++ b/modules/spacelift/worker-pool/outputs.tf @@ -0,0 +1,89 @@ +output "worker_pool_id" { + value = join("", spacelift_worker_pool.primary[*].id) + description = "Spacelift worker pool ID" +} + +output "worker_pool_name" { + value = join("", spacelift_worker_pool.primary[*].name) + description = "Spacelift worker pool name" +} + +output "security_group_id" { + description = "Spacelift Security Group ID" + value = module.security_group.id +} + +output "security_group_arn" { + description = "Spacelift Security Group ARN" + value = module.security_group.arn +} + +output "security_group_name" { + description = "Spacelift Security Group Name" + value = module.security_group.name +} + +output "launch_template_id" { + description = "The ID of the launch template" + value = module.autoscale_group.launch_template_id +} + +output "launch_template_arn" { + description = "The ARN of the launch template" + value = module.autoscale_group.launch_template_arn +} + +output "autoscaling_group_id" { + description = "The autoscaling group id" + value = module.autoscale_group.autoscaling_group_id +} + +output "autoscaling_group_name" { + description = "The autoscaling group name" + value = module.autoscale_group.autoscaling_group_name +} + +output "autoscaling_group_arn" { + description = "The ARN for this AutoScaling Group" + value = module.autoscale_group.autoscaling_group_arn +} + +output "autoscaling_group_min_size" { + description = "The minimum size of the autoscale group" + value = module.autoscale_group.autoscaling_group_min_size +} + +output "autoscaling_group_max_size" { + description = "The maximum size of the autoscale group" + value = module.autoscale_group.autoscaling_group_max_size +} + +output "autoscaling_group_default_cooldown" { + description = "Time between a scaling activity and the succeeding scaling activity" + value = module.autoscale_group.autoscaling_group_default_cooldown +} + +output "autoscaling_group_health_check_grace_period" { + description = "Time after instance comes into service before checking health" + value = module.autoscale_group.autoscaling_group_health_check_grace_period +} + +output "autoscaling_group_health_check_type" { + description = "`EC2` or `ELB`. Controls how health checking is done" + value = module.autoscale_group.autoscaling_group_health_check_type +} + +output "iam_role_name" { + value = join("", aws_iam_role.default[*].name) + description = "Spacelift IAM Role name" +} + +output "iam_role_id" { + value = join("", aws_iam_role.default[*].unique_id) + description = "Spacelift IAM Role ID" +} + +output "iam_role_arn" { + value = join("", aws_iam_role.default[*].arn) + description = "Spacelift IAM Role ARN" +} diff --git a/modules/spacelift/worker-pool/provider-spacelift.tf b/modules/spacelift/worker-pool/provider-spacelift.tf new file mode 100644 index 000000000..9634cde90 --- /dev/null +++ b/modules/spacelift/worker-pool/provider-spacelift.tf @@ -0,0 +1,6 @@ +# This provider always validates its credentials, so we always pass api_key_id and api_key_secret +provider "spacelift" { + api_key_endpoint = var.spacelift_api_endpoint + api_key_id = data.aws_ssm_parameter.spacelift_key_id.value + api_key_secret = data.aws_ssm_parameter.spacelift_key_secret.value +} diff --git a/modules/spacelift/worker-pool/providers.tf b/modules/spacelift/worker-pool/providers.tf new file mode 100644 index 000000000..89ed50a98 --- /dev/null +++ b/modules/spacelift/worker-pool/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/spacelift/worker-pool/remote-state.tf b/modules/spacelift/worker-pool/remote-state.tf new file mode 100644 index 000000000..01e086622 --- /dev/null +++ b/modules/spacelift/worker-pool/remote-state.tf @@ -0,0 +1,44 @@ +module "account_map" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "account-map" + environment = try(coalesce(var.account_map_environment_name, module.this.environment), null) + stage = var.account_map_stage_name + tenant = try(coalesce(var.account_map_tenant_name, module.this.tenant), null) + + context = module.this.context +} + +module "ecr" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "ecr" + environment = try(coalesce(var.ecr_environment_name, module.this.environment), null) + stage = var.ecr_stage_name + tenant = try(coalesce(var.ecr_tenant_name, module.this.tenant), null) + + context = module.this.context +} + +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = "vpc" + + context = module.this.context +} + +module "spaces" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.spacelift_spaces_component_name + environment = try(coalesce(var.spacelift_spaces_environment_name, module.this.environment), null) + stage = try(coalesce(var.spacelift_spaces_stage_name, module.this.stage), null) + tenant = try(coalesce(var.spacelift_spaces_tenant_name, module.this.tenant), null) + + context = module.this.context +} diff --git a/modules/spacelift/worker-pool/templates/spacelift@.service b/modules/spacelift/worker-pool/templates/spacelift@.service new file mode 100644 index 000000000..4985def09 --- /dev/null +++ b/modules/spacelift/worker-pool/templates/spacelift@.service @@ -0,0 +1,13 @@ +[Unit] +Description=Spacelift agent %i + +[Service] +Type=simple +ExecStart=/bin/bash -c "/usr/bin/spacelift-launcher 1>>/var/log/spacelift/info.log 2>>/var/log/spacelift/error.log" +EnvironmentFile=/etc/spacelift/spacelift.env +Restart=always +RestartSec=20 +TimeoutStartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/modules/spacelift/worker-pool/templates/user-data.sh b/modules/spacelift/worker-pool/templates/user-data.sh new file mode 100644 index 000000000..7c4528b2f --- /dev/null +++ b/modules/spacelift/worker-pool/templates/user-data.sh @@ -0,0 +1,125 @@ +#!/bin/bash -e + +spacelift() { ( + set -e + + echo "Updating packages (security)" | tee -a /var/log/spacelift/info.log + yum update-minimal --security -y 1>>/var/log/spacelift/info.log 2>>/var/log/spacelift/error.log + + if ! which docker-credential-ecr-login; then + yum install -y amazon-ecr-credential-helper + fi + # Due to https://github.com/docker/cli/issues/2738 + # we need to create the config.json file for all users + for home in /root $(ls /home); do + mkdir -p $home/.docker + echo '{"credsStore": "ecr-login"}' >$home/.docker/config.json + done + docker pull ${spacelift_runner_image} + + %{ if github_netrc_enabled } + export GITHUB_TOKEN=$(aws ssm get-parameters --region=${region} --name ${github_netrc_ssm_path_token} --with-decryption --query "Parameters[0].Value" --output text) + export GITHUB_USER=$(aws ssm get-parameters --region=${region} --name ${github_netrc_ssm_path_user} --with-decryption --query "Parameters[0].Value" --output text) + + # Allows downloading terraform modules using a GitHub PAT + NETRC_FILE="/root/.netrc" + echo "Creating $NETRC_FILE" + printf "machine github.com\n" >"$NETRC_FILE" + printf "login %s\n" "$GITHUB_USER" >>"$NETRC_FILE" + printf "password %s\n" "$GITHUB_TOKEN" >>"$NETRC_FILE" + echo "Created $NETRC_FILE" + + # Converts ssh clones into https clones to take advantage of the GitHub PAT + ## NOTE: --system cannot be used as HOME is unset during the cloud-init userdata portion + ## so --file has to be passed in manually. + yum install git -y + GIT_CONFIG="/root/.gitconfig" + echo "Creating $GIT_CONFIG" + git config --file $GIT_CONFIG url."https://github.com/".insteadOf "git@github.com:" + git config --file $GIT_CONFIG url."https://github.com/".insteadOf "ssh://git@github.com/" --add + echo "Created $GIT_CONFIG" + yum remove git -y + + # Mount the .netrc and .gitconfig files into the container + export SPACELIFT_WORKER_EXTRA_MOUNTS=$NETRC_FILE:/conf/.netrc,$GIT_CONFIG:/conf/.gitconfig + %{ endif } + + %{ if infracost_enabled } + export INFRACOST_API_KEY=$(aws ssm get-parameters --region=${region} --name ${infracost_api_token_ssm_path} --with-decryption --query "Parameters[0].Value" --output text) + export INFRACOST_CLI_ARGS=${infracost_cli_args} + export INFRACOST_WARN_ON_FAILURE=${infracost_warn_on_failure} + %{ endif } + + export SPACELIFT_POOL_PRIVATE_KEY=${spacelift_worker_pool_private_key} + export SPACELIFT_TOKEN=${spacelift_worker_pool_config} + # This is a comma separated list of all the environment variables to read from the env file + export SPACELIFT_WHITELIST_ENVS=AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,AWS_SESSION_TOKEN,AWS_SDK_LOAD_CONFIG,AWS_CONFIG_FILE,AWS_PROFILE,GITHUB_TOKEN,INFRACOST_API_KEY,ATMOS_BASE_PATH,TF_VAR_terraform_user + # This is a comma separated list of all the sensitive environment variables that will show up masked if printed during a run + export SPACELIFT_MASK_ENVS=AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,AWS_SESSION_TOKEN,GITHUB_TOKEN,INFRACOST_API_KEY + export SPACELIFT_LAUNCHER_LOGS_TIMEOUT=30m + export SPACELIFT_LAUNCHER_RUN_TIMEOUT=120m + # These vars are prefixed with TMP_ so they do not conflict with AWS_ specific vars + export TMP_AWS_SDK_LOAD_CONFIG=true + export TMP_AWS_CONFIG_FILE=${aws_config_file} + export TMP_AWS_PROFILE=${aws_profile} + + echo "Turning on swap" | tee -a /var/log/spacelift/info.log + dd if=/dev/zero of=/swapfile bs=128M count=32 2>/var/log/spacelift/error.log + chmod 600 /swapfile 2>/var/log/spacelift/error.log + mkswap /swapfile 2>/var/log/spacelift/error.log + swapon /swapfile 2>/var/log/spacelift/error.log + swapon -s | tee -a /var/log/spacelift/info.log + + echo "Downloading Spacelift launcher" | tee -a /var/log/spacelift/info.log + curl https://downloads.${spacelift_domain_name}/spacelift-launcher --output /usr/bin/spacelift-launcher 2>>/var/log/spacelift/error.log + + echo "Making the Spacelift launcher executable" | tee -a /var/log/spacelift/info.log + chmod 755 /usr/bin/spacelift-launcher 2>>/var/log/spacelift/error.log + + echo "Retrieving EC2 instance ID" | tee -a /var/log/spacelift/info.log + export SPACELIFT_METADATA_instance_id=$(ec2-metadata --instance-id | cut -d ' ' -f2) + + echo "Retrieving EC2 ASG ID" | tee -a /var/log/spacelift/info.log + export SPACELIFT_METADATA_asg_id=$(aws autoscaling --region=${region} describe-auto-scaling-instances --instance-ids "$SPACELIFT_METADATA_instance_id" | jq -r '.AutoScalingInstances[0].AutoScalingGroupName') + + echo "Preparing Spacelift ENV variables" | tee -a /var/log/spacelift/info.log + env_file="/etc/spacelift/spacelift.env" + sudo mkdir -p "/etc/spacelift" + sudo touch "$env_file" + sudo chmod 744 "$env_file" + printf "SPACELIFT_POOL_PRIVATE_KEY=%s\n" "$SPACELIFT_POOL_PRIVATE_KEY" >"$env_file" + printf "SPACELIFT_TOKEN=%s\n" "$SPACELIFT_TOKEN" >>"$env_file" + printf "SPACELIFT_WHITELIST_ENVS=%s\n" "$SPACELIFT_WHITELIST_ENVS" >>"$env_file" + printf "SPACELIFT_MASK_ENVS=%s\n" "$SPACELIFT_MASK_ENVS" >>"$env_file" + printf "SPACELIFT_LAUNCHER_LOGS_TIMEOUT=%s\n" "$SPACELIFT_LAUNCHER_LOGS_TIMEOUT" >>"$env_file" + printf "SPACELIFT_LAUNCHER_RUN_TIMEOUT=%s\n" "$SPACELIFT_LAUNCHER_RUN_TIMEOUT" >>"$env_file" + printf "SPACELIFT_METADATA_instance_id=%s\n" "$SPACELIFT_METADATA_instance_id" >>"$env_file" + printf "SPACELIFT_METADATA_asg_id=%s\n" "$SPACELIFT_METADATA_asg_id" >>"$env_file" + printf "AWS_SDK_LOAD_CONFIG=%s\n" "$TMP_AWS_SDK_LOAD_CONFIG" >>"$env_file" + printf "AWS_CONFIG_FILE=%s\n" "$TMP_AWS_CONFIG_FILE" >>"$env_file" + printf "AWS_PROFILE=%s\n" "$TMP_AWS_PROFILE" >>"$env_file" + printf "ATMOS_BASE_PATH=%s\n" "/mnt/workspace/source" >>"$env_file" + printf "TF_VAR_terraform_user=%s\n" "spacelift" >>"$env_file" + [[ ! -z "$GITHUB_TOKEN" ]] && printf "GITHUB_TOKEN=%s\n" "$GITHUB_TOKEN" >>"$env_file" + [[ ! -z "$GITHUB_USER" ]] && printf "GITHUB_USER=%s\n" "$GITHUB_USER" >>"$env_file" + [[ ! -z "$SPACELIFT_WORKER_EXTRA_MOUNTS" ]] && printf "SPACELIFT_WORKER_EXTRA_MOUNTS=%s\n" "$SPACELIFT_WORKER_EXTRA_MOUNTS" >>"$env_file" + [[ ! -z "$INFRACOST_API_KEY" ]] && printf "INFRACOST_API_KEY=%s\n" "$INFRACOST_API_KEY" >>"$env_file" + + echo "Enabling Spacelift agent services" | tee -a /var/log/spacelift/info.log + sudo systemctl enable spacelift@{1..${spacelift_agents_per_node}}.service + + echo "Enabling Amazon SSM agent" | tee -a /var/log/spacelift/info.log + sudo systemctl enable amazon-ssm-agent + + echo "Reloading systemd daemon" | tee -a /var/log/spacelift/info.log + sudo systemctl daemon-reload + + echo "Starting Amazon SSM agent" | tee -a /var/log/spacelift/info.log + sudo systemctl start amazon-ssm-agent + + echo "Starting Spacelift agents" | tee -a /var/log/spacelift/info.log + sudo systemctl start spacelift@{1..${spacelift_agents_per_node}}.service + +); } + +spacelift diff --git a/modules/spacelift/worker-pool/variables.tf b/modules/spacelift/worker-pool/variables.tf new file mode 100644 index 000000000..4ebd092c0 --- /dev/null +++ b/modules/spacelift/worker-pool/variables.tf @@ -0,0 +1,342 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "mixed_instances_policy" { + description = "Policy to use a mixed group of on-demand/spot of different types. Launch template is automatically generated. https://www.terraform.io/docs/providers/aws/r/autoscaling_group.html#mixed_instances_policy-1" + + type = object({ + instances_distribution = object({ + on_demand_allocation_strategy = string + on_demand_base_capacity = number + on_demand_percentage_above_base_capacity = number + spot_allocation_strategy = string + spot_instance_pools = number + spot_max_price = string + }) + override = list(object({ + instance_type = string + weighted_capacity = number + })) + }) + default = null +} + +variable "ebs_optimized" { + type = bool + description = "If true, the launched EC2 instance will be EBS-optimized" + default = false +} + +variable "max_size" { + type = number + description = "The maximum size of the autoscale group" +} + +variable "min_size" { + type = number + description = "The minimum size of the autoscale group" +} + +variable "desired_capacity" { + type = number + description = "The number of Amazon EC2 instances that should be running in the group, if not set will use `min_size` as value" + default = null +} + +variable "wait_for_capacity_timeout" { + type = string + description = "A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior" +} + +variable "cpu_utilization_high_threshold_percent" { + type = number + description = "CPU utilization high threshold" +} + +variable "cpu_utilization_low_threshold_percent" { + type = number + description = "CPU utilization low threshold" +} + +variable "instance_lifetime" { + type = number + default = 1209600 + description = "Number of seconds after which the instance will be terminated. The default is set to 14 days." +} + +variable "default_cooldown" { + type = number + description = "The amount of time, in seconds, after a scaling activity completes before another scaling activity can start" + default = 300 +} + +variable "scale_down_cooldown_seconds" { + type = number + default = 300 + description = "The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start" +} + +variable "health_check_type" { + type = string + description = "Controls how health checking is done. Valid values are `EC2` or `ELB`" + default = "EC2" +} + +variable "health_check_grace_period" { + type = number + description = "Time (in seconds) after instance comes into service before checking health" + default = 300 +} + +variable "termination_policies" { + description = "A list of policies to decide how the instances in the auto scale group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `Default`" + type = list(string) + default = ["OldestLaunchConfiguration"] +} + +variable "block_device_mappings" { + description = "Specify volumes to attach to the instance besides the volumes specified by the AMI" + + type = list(object({ + device_name = string + no_device = bool + virtual_name = string + ebs = object({ + delete_on_termination = bool + encrypted = bool + iops = number + kms_key_id = string + snapshot_id = string + volume_size = number + volume_type = string + }) + })) + + default = [] +} + +variable "account_map_environment_name" { + type = string + description = "The name of the environment where `account_map` is provisioned" + default = "gbl" +} + +variable "account_map_stage_name" { + type = string + description = "The name of the stage where `account_map` is provisioned" + default = "root" +} + +variable "account_map_tenant_name" { + type = string + description = <<-EOT + The name of the tenant where `account_map` is provisioned. + + If the `tenant` label is not used, leave this as `null`. + EOT + default = null +} + +variable "ecr_environment_name" { + type = string + description = "The name of the environment where `ecr` is provisioned" + default = "" +} + +variable "ecr_stage_name" { + type = string + description = "The name of the stage where `ecr` is provisioned" + default = "artifacts" +} + +variable "ecr_tenant_name" { + type = string + description = <<-EOT + The name of the tenant where `ecr` is provisioned. + + If the `tenant` label is not used, leave this as `null`. + EOT + default = null +} + + +variable "ecr_region" { + type = string + description = "AWS region that contains the ECR infrastructure repo" + default = "" +} + +variable "ecr_repo_name" { + type = string + description = "ECR repository name" +} + +variable "instance_type" { + type = string + description = "EC2 instance type to use for workers" + default = "r5n.large" +} + +variable "spacelift_runner_image" { + type = string + description = "URL of ECR image to use for Spacelift" + default = "" +} + +variable "spacelift_api_endpoint" { + type = string + description = "The Spacelift API endpoint URL (e.g. https://example.app.spacelift.io)" +} + +variable "spacelift_ami_id" { + type = string + description = "AMI ID of Spacelift worker pool image" + default = null +} + +variable "custom_spacelift_ami" { + type = bool + description = "Custom spacelift AMI" + default = false +} + +variable "architecture" { + type = list(string) + description = "OS architecture of the EC2 instance AMI" + default = ["x86_64"] +} + +variable "spacelift_domain_name" { + type = string + description = "Top-level domain name to use for pulling the launcher binary" + default = "spacelift.io" +} + +variable "iam_attributes" { + type = list(string) + description = "Additional attributes to add to the IDs of the IAM role and policy" + default = [] +} + +variable "launch_template_version" { + type = string + description = "Launch template version to use for workers. Note that instance refresh settings are IGNORED unless template version is empty" + default = "$Latest" +} + +variable "instance_refresh" { + description = "The instance refresh definition. If this block is configured, an Instance Refresh will be started when the Auto Scaling Group is updated" + type = object({ + strategy = string + preferences = object({ + instance_warmup = optional(number, null) + min_healthy_percentage = optional(number, null) + skip_matching = optional(bool, null) + auto_rollback = optional(bool, null) + }) + triggers = optional(list(string), []) + }) + + default = null +} + +variable "github_netrc_enabled" { + type = bool + description = "Whether to create a GitHub .netrc file so Spacelift can clone private GitHub repositories." + default = false +} + +variable "github_netrc_ssm_path_token" { + type = string + description = "If `github_netrc` is enabled, this is the SSM path to retrieve the GitHub token." + default = "/github/token" +} + +variable "github_netrc_ssm_path_user" { + type = string + description = "If `github_netrc` is enabled, this is the SSM path to retrieve the GitHub user" + default = "/github/user" +} + +variable "infracost_enabled" { + type = bool + description = "Whether to enable infracost for Spacelift stacks" + default = false +} + +variable "infracost_api_token_ssm_path" { + type = string + description = "This is the SSM path to retrieve and set the INFRACOST_API_TOKEN environment variable" + default = "/infracost/token" +} + +variable "infracost_cli_args" { + type = string + description = "These are the CLI args passed to infracost" + default = "" +} + +variable "infracost_warn_on_failure" { + type = bool + description = "A failure executing Infracost, or a non-zero exit code being returned from the command will cause runs to fail. If this is true, this will only warn instead of failing the stack." + default = true +} + +variable "aws_config_file" { + type = string + description = "The AWS_CONFIG_FILE used by the worker. Can be overridden by `/.spacelift/config.yml`." + default = "/etc/aws-config/aws-config-spacelift" +} + +variable "aws_profile" { + type = string + description = <<-EOT + The AWS_PROFILE used by the worker. If not specified, `"$${var.namespace}-identity"` will be used. + Can be overridden by `/.spacelift/config.yml`. + EOT + default = null +} + +variable "spacelift_agents_per_node" { + type = number + description = "Number of Spacelift agents to run on one worker node. NOTE: This affects billable units. Spacelift charges per agent." + default = 1 +} + +variable "spacelift_aws_account_id" { + type = string + description = "AWS Account ID owned by Spacelift" + default = "643313122712" +} + +variable "space_name" { + type = string + description = "The name of the Space to create the worker pool in" + default = "root" +} + +variable "spacelift_spaces_component_name" { + type = string + description = "The name of the spacelift spaces component" + default = "spacelift/spaces" +} + +variable "spacelift_spaces_environment_name" { + type = string + description = "The environment name of the spacelift spaces component" + default = null +} + +variable "spacelift_spaces_stage_name" { + type = string + description = "The stage name of the spacelift spaces component" + default = null +} + +variable "spacelift_spaces_tenant_name" { + type = string + description = "The tenant name of the spacelift spaces component" + default = null +} diff --git a/modules/spacelift/worker-pool/versions.tf b/modules/spacelift/worker-pool/versions.tf new file mode 100644 index 000000000..1b4e6e5f2 --- /dev/null +++ b/modules/spacelift/worker-pool/versions.tf @@ -0,0 +1,18 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + spacelift = { + source = "spacelift-io/spacelift" + version = ">= 0.1.2" + } + aws = { + source = "hashicorp/aws" + version = ">= 4.9.0" + } + cloudinit = { + source = "hashicorp/cloudinit" + version = ">= 2.2.0" + } + } +} diff --git a/modules/sqs-queue/CHANGELOG.md b/modules/sqs-queue/CHANGELOG.md new file mode 100644 index 000000000..9bc0c026d --- /dev/null +++ b/modules/sqs-queue/CHANGELOG.md @@ -0,0 +1,23 @@ +## Pull Request [#1042](https://github.com/cloudposse/terraform-aws-components/pull/1042) - Refactor `sqs-queue` Component + +Components PR [#1042](https://github.com/cloudposse/terraform-aws-components/pull/1042) + +### Affected Components + +- `sqs-queue` + +### Summary + +This change to the sqs-queue component, [#1042](https://github.com/cloudposse/terraform-aws-components/pull/1042), +refactored the `sqs-queue` component to use the AWS Module for queues, this provides better support for Dead-Letter +Queues and easy policy attachment. + +As part of that change, we've changed some variables: + +- `policy` - **Removed** +- `redrive_policy` - **Removed** +- `dead_letter_sqs_arn` - **Removed** +- `dead_letter_component_name` - **Removed** +- `dead_letter_max_receive_count` - Renamed to `dlq_max_receive_count` +- `fifo_throughput_limit` **type changed** from `list(string)` to type `string` +- `kms_master_key_id` **type changed** from `list(string)` to type `string` diff --git a/modules/sqs-queue/README.md b/modules/sqs-queue/README.md index 60db805c8..ee5b7a583 100644 --- a/modules/sqs-queue/README.md +++ b/modules/sqs-queue/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/sqs-queue + - layer/addons + - provider/aws +--- + # Component: `sqs-queue` This component is responsible for creating an SQS queue. @@ -11,37 +18,86 @@ Here's an example snippet for how to use this component. ```yaml components: terraform: - sqs-queue: - settings: - spacelift: - workspace_enabled: true + sqs-queue/defaults: vars: enabled: true + # org defaults + + sqs-queue: + metadata: + component: sqs-queue + inherits: + - sqs-queue/defaults + vars: + name: sqs + visibility_timeout_seconds: 30 + message_retention_seconds: 86400 # 1 day + delay_seconds: 0 + max_message_size_bytes: 262144 + receive_wait_time_seconds: 0 + fifo_queue: false + content_based_deduplication: false + dlq_enabled: true + dlq_name_suffix: "dead-letter" # default is dlq + dlq_max_receive_count: 1 + dlq_kms_data_key_reuse_period_seconds: 86400 # 1 day + kms_data_key_reuse_period_seconds: 86400 # 1 day + # kms_master_key_id: "alias/aws/sqs" # Use KMS # default null + sqs_managed_sse_enabled: true # SSE vs KMS (Priority goes to KMS) + iam_policy_limit_to_current_account: true # default true + iam_policy: + - version: 2012-10-17 + policy_id: Allow-S3-Event-Notifications + statements: + - sid: Allow-S3-Event-Notifications + effect: Allow + principals: + - type: Service + identifiers: ["s3.amazonaws.com"] + actions: + - SQS:SendMessage + resources: [] # auto includes this queue's ARN + conditions: + ## this is included when `iam_policy_limit_to_current_account` is true + #- test: StringEquals + # variable: aws:SourceAccount + # value: "1234567890" + - test: ArnLike + variable: aws:SourceArn + values: + - "arn:aws:s3:::*" ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.0 | ## Modules | Name | Source | Version | |------|--------|---------| | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [sqs\_queue](#module\_sqs\_queue) | ./modules/terraform-aws-sqs-queue | n/a | +| [queue\_policy](#module\_queue\_policy) | cloudposse/iam-policy/aws | 2.0.1 | +| [sqs](#module\_sqs) | terraform-aws-modules/sqs/aws | 4.2.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources -No resources. +| Name | Type | +|------|------| +| [aws_sqs_queue_policy.sqs_queue_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | ## Inputs @@ -51,19 +107,34 @@ No resources. | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [content\_based\_deduplication](#input\_content\_based\_deduplication) | Enables content-based deduplication for FIFO queues. For more information, see the [related documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) | `bool` | `false` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | -| [deduplication\_scope](#input\_deduplication\_scope) | Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. This can be specified if fifo\_queue is true. | `list(string)` | `[]` | no | +| [create\_dlq\_redrive\_allow\_policy](#input\_create\_dlq\_redrive\_allow\_policy) | Determines whether to create a redrive allow policy for the dead letter queue. | `bool` | `true` | no | +| [deduplication\_scope](#input\_deduplication\_scope) | Specifies whether message deduplication occurs at the message group or queue level | `string` | `null` | no | | [delay\_seconds](#input\_delay\_seconds) | The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 seconds. | `number` | `0` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | +| [dlq\_content\_based\_deduplication](#input\_dlq\_content\_based\_deduplication) | Enables content-based deduplication for FIFO queues | `bool` | `null` | no | +| [dlq\_deduplication\_scope](#input\_dlq\_deduplication\_scope) | Specifies whether message deduplication occurs at the message group or queue level | `string` | `null` | no | +| [dlq\_delay\_seconds](#input\_dlq\_delay\_seconds) | The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes) | `number` | `null` | no | +| [dlq\_enabled](#input\_dlq\_enabled) | Boolean designating whether the Dead Letter Queue should be created by this component. | `bool` | `false` | no | +| [dlq\_kms\_data\_key\_reuse\_period\_seconds](#input\_dlq\_kms\_data\_key\_reuse\_period\_seconds) | The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours) | `number` | `null` | no | +| [dlq\_kms\_master\_key\_id](#input\_dlq\_kms\_master\_key\_id) | The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK | `string` | `null` | no | +| [dlq\_max\_receive\_count](#input\_dlq\_max\_receive\_count) | The number of times a message can be unsuccessfully dequeued before being moved to the Dead Letter Queue. | `number` | `5` | no | +| [dlq\_message\_retention\_seconds](#input\_dlq\_message\_retention\_seconds) | The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days) | `number` | `null` | no | +| [dlq\_name\_suffix](#input\_dlq\_name\_suffix) | The suffix of the Dead Letter Queue. | `string` | `"dlq"` | no | +| [dlq\_receive\_wait\_time\_seconds](#input\_dlq\_receive\_wait\_time\_seconds) | The time for which a ReceiveMessage call will wait for a message to arrive (long polling) before returning. An integer from 0 to 20 (seconds) | `number` | `null` | no | +| [dlq\_redrive\_allow\_policy](#input\_dlq\_redrive\_allow\_policy) | The JSON policy to set up the Dead Letter Queue redrive permission, see AWS docs. | `any` | `{}` | no | +| [dlq\_sqs\_managed\_sse\_enabled](#input\_dlq\_sqs\_managed\_sse\_enabled) | Boolean to enable server-side encryption (SSE) of message content with SQS-owned encryption keys | `bool` | `true` | no | +| [dlq\_tags](#input\_dlq\_tags) | A mapping of additional tags to assign to the dead letter queue | `map(string)` | `{}` | no | +| [dlq\_visibility\_timeout\_seconds](#input\_dlq\_visibility\_timeout\_seconds) | The visibility timeout for the queue. An integer from 0 to 43200 (12 hours) | `number` | `null` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [fifo\_queue](#input\_fifo\_queue) | Boolean designating a FIFO queue. If not set, it defaults to false making it standard. | `bool` | `false` | no | -| [fifo\_throughput\_limit](#input\_fifo\_throughput\_limit) | Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. This can be specified if fifo\_queue is true. | `list(string)` | `[]` | no | +| [fifo\_throughput\_limit](#input\_fifo\_throughput\_limit) | Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. This can be specified if fifo\_queue is true. | `string` | `null` | no | +| [iam\_policy](#input\_iam\_policy) | IAM policy as list of Terraform objects, compatible with Terraform `aws_iam_policy_document` data source
except that `source_policy_documents` and `override_policy_documents` are not included.
Use inputs `iam_source_policy_documents` and `iam_override_policy_documents` for that. |
list(object({
policy_id = optional(string, null)
version = optional(string, null)
statements = list(object({
sid = optional(string, null)
effect = optional(string, null)
actions = optional(list(string), null)
not_actions = optional(list(string), null)
resources = optional(list(string), null)
not_resources = optional(list(string), null)
conditions = optional(list(object({
test = string
variable = string
values = list(string)
})), [])
principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
not_principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
}))
}))
| `[]` | no | +| [iam\_policy\_limit\_to\_current\_account](#input\_iam\_policy\_limit\_to\_current\_account) | Boolean designating whether the IAM policy should be limited to the current account. | `bool` | `true` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kms\_data\_key\_reuse\_period\_seconds](#input\_kms\_data\_key\_reuse\_period\_seconds) | The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). | `number` | `300` | no | -| [kms\_master\_key\_id](#input\_kms\_master\_key\_id) | The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. | `list(string)` |
[
"alias/aws/sqs"
]
| no | +| [kms\_master\_key\_id](#input\_kms\_master\_key\_id) | The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -72,11 +143,10 @@ No resources. | [message\_retention\_seconds](#input\_message\_retention\_seconds) | The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days). | `number` | `345600` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [policy](#input\_policy) | The JSON policy for the SQS queue. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). | `list(string)` | `[]` | no | | [receive\_wait\_time\_seconds](#input\_receive\_wait\_time\_seconds) | The time for which a ReceiveMessage call will wait for a message to arrive (long polling) before returning. An integer from 0 to 20 (seconds). The default for this attribute is 0, meaning that the call will return immediately. | `number` | `0` | no | -| [redrive\_policy](#input\_redrive\_policy) | The JSON policy to set up the Dead Letter Queue, see AWS docs. Note: when specifying maxReceiveCount, you must specify it as an integer (5), and not a string ("5"). | `list(string)` | `[]` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [sqs\_managed\_sse\_enabled](#input\_sqs\_managed\_sse\_enabled) | Boolean to enable server-side encryption (SSE) of message content with SQS-owned encryption keys | `bool` | `true` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -86,13 +156,13 @@ No resources. | Name | Description | |------|-------------| -| [arn](#output\_arn) | The ARN of the created Amazon SQS queue | -| [id](#output\_id) | The ID of the created Amazon SQS queue. Same as the URL. | -| [name](#output\_name) | The name of the created Amazon SQS queue. | -| [url](#output\_url) | The URL of the created Amazon SQS queue. | +| [sqs\_queue](#output\_sqs\_queue) | The SQS queue. | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/sqs-queue) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/sqs-queue) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/sqs-queue/default.auto.tfvars b/modules/sqs-queue/default.auto.tfvars deleted file mode 100644 index 31d32a5b7..000000000 --- a/modules/sqs-queue/default.auto.tfvars +++ /dev/null @@ -1,2 +0,0 @@ -# This file is included by default in terraform plans -enabled = false diff --git a/modules/sqs-queue/main.tf b/modules/sqs-queue/main.tf index 8d350131a..31e81bd6e 100644 --- a/modules/sqs-queue/main.tf +++ b/modules/sqs-queue/main.tf @@ -1,23 +1,91 @@ locals { - enabled = module.this.enabled + enabled = module.this.enabled + aws_account_number = one(data.aws_caller_identity.current[*].account_id) + policy_enabled = local.enabled && length(var.iam_policy) > 0 } -module "sqs_queue" { - source = "./modules/terraform-aws-sqs-queue" +module "sqs" { + source = "terraform-aws-modules/sqs/aws" + version = "4.2.0" + + name = module.this.id + + create_dlq = var.dlq_enabled + dlq_name = "${module.this.id}-${var.dlq_name_suffix}" + dlq_content_based_deduplication = var.dlq_content_based_deduplication + dlq_deduplication_scope = var.dlq_deduplication_scope + dlq_kms_master_key_id = var.dlq_kms_master_key_id + dlq_delay_seconds = var.dlq_delay_seconds + dlq_kms_data_key_reuse_period_seconds = var.dlq_kms_data_key_reuse_period_seconds + dlq_message_retention_seconds = var.dlq_message_retention_seconds + dlq_receive_wait_time_seconds = var.dlq_receive_wait_time_seconds + create_dlq_redrive_allow_policy = var.create_dlq_redrive_allow_policy + dlq_redrive_allow_policy = var.dlq_redrive_allow_policy + dlq_sqs_managed_sse_enabled = var.dlq_sqs_managed_sse_enabled + dlq_visibility_timeout_seconds = var.dlq_visibility_timeout_seconds + dlq_tags = merge(module.this.tags, var.dlq_tags) + redrive_policy = var.dlq_enabled ? { + maxReceiveCount = var.dlq_max_receive_count + } : {} visibility_timeout_seconds = var.visibility_timeout_seconds message_retention_seconds = var.message_retention_seconds - max_message_size = var.max_message_size delay_seconds = var.delay_seconds receive_wait_time_seconds = var.receive_wait_time_seconds - policy = try([var.policy[0]], []) - redrive_policy = try([var.redrive_policy[0]], []) + max_message_size = var.max_message_size fifo_queue = var.fifo_queue - fifo_throughput_limit = try([var.fifo_throughput_limit[0]], []) content_based_deduplication = var.content_based_deduplication - kms_master_key_id = try([var.kms_master_key_id[0]], []) + kms_master_key_id = var.kms_master_key_id kms_data_key_reuse_period_seconds = var.kms_data_key_reuse_period_seconds - deduplication_scope = try([var.deduplication_scope[0]], []) + sqs_managed_sse_enabled = var.sqs_managed_sse_enabled + fifo_throughput_limit = var.fifo_throughput_limit + deduplication_scope = var.deduplication_scope + + tags = module.this.tags +} + +data "aws_caller_identity" "current" { + count = local.enabled ? 1 : 0 +} + +module "queue_policy" { + count = local.policy_enabled ? 1 : 0 + + source = "cloudposse/iam-policy/aws" + version = "2.0.1" + + iam_policy = [ + for policy in var.iam_policy : { + policy_id = policy.policy_id + version = policy.version + + statements = [ + for statement in policy.statements : + merge( + statement, + { + resources = [module.sqs.queue_arn] + }, + var.iam_policy_limit_to_current_account ? { + conditions = concat(statement.conditions, [ + { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [local.aws_account_number] + } + ]) + } : {} + ) + ] + } + ] context = module.this.context } + +resource "aws_sqs_queue_policy" "sqs_queue_policy" { + count = local.policy_enabled ? 1 : 0 + + queue_url = module.sqs.queue_url + policy = one(module.queue_policy[*].json) +} diff --git a/modules/sqs-queue/modules/terraform-aws-sqs-queue/main.tf b/modules/sqs-queue/modules/terraform-aws-sqs-queue/main.tf deleted file mode 100644 index f49930c76..000000000 --- a/modules/sqs-queue/modules/terraform-aws-sqs-queue/main.tf +++ /dev/null @@ -1,24 +0,0 @@ -locals { - enabled = module.this.enabled -} - -resource "aws_sqs_queue" "default" { - count = local.enabled ? 1 : 0 - - name = var.fifo_queue ? "${module.this.id}.fifo" : module.this.id - visibility_timeout_seconds = var.visibility_timeout_seconds - message_retention_seconds = var.message_retention_seconds - max_message_size = var.max_message_size - delay_seconds = var.delay_seconds - receive_wait_time_seconds = var.receive_wait_time_seconds - policy = try(var.policy[0], null) - redrive_policy = try(var.redrive_policy[0], null) - fifo_queue = var.fifo_queue - fifo_throughput_limit = try(var.fifo_throughput_limit[0], null) - content_based_deduplication = var.content_based_deduplication - kms_master_key_id = try(var.kms_master_key_id[0], null) - kms_data_key_reuse_period_seconds = var.kms_data_key_reuse_period_seconds - deduplication_scope = try(var.deduplication_scope[0], null) - - tags = module.this.tags -} diff --git a/modules/sqs-queue/modules/terraform-aws-sqs-queue/outputs.tf b/modules/sqs-queue/modules/terraform-aws-sqs-queue/outputs.tf deleted file mode 100644 index 3aa19bae5..000000000 --- a/modules/sqs-queue/modules/terraform-aws-sqs-queue/outputs.tf +++ /dev/null @@ -1,19 +0,0 @@ -output "url" { - description = "The URL of the created Amazon SQS queue." - value = local.enabled ? aws_sqs_queue.default[0].url : null -} - -output "id" { - description = "The ID of the created Amazon SQS queue. Same as the URL." - value = local.enabled ? aws_sqs_queue.default[0].id : null -} - -output "name" { - description = "The name of the created Amazon SQS queue." - value = local.enabled ? module.this.id : null -} - -output "arn" { - description = "The ARN of the created Amazon SQS queue." - value = local.enabled ? aws_sqs_queue.default[0].arn : null -} diff --git a/modules/sqs-queue/modules/terraform-aws-sqs-queue/variables.tf b/modules/sqs-queue/modules/terraform-aws-sqs-queue/variables.tf deleted file mode 100644 index 2ba38d026..000000000 --- a/modules/sqs-queue/modules/terraform-aws-sqs-queue/variables.tf +++ /dev/null @@ -1,125 +0,0 @@ -variable "visibility_timeout_seconds" { - type = number - description = "The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). The default for this attribute is 30. For more information about visibility timeout, see AWS docs." - default = 30 - validation { - condition = ( - var.visibility_timeout_seconds >= 0 && var.visibility_timeout_seconds <= 43200 - ) - error_message = "Var must be between 0 and 43200." - } -} - -variable "message_retention_seconds" { - type = number - description = "The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days)." - default = 345600 - validation { - condition = ( - var.message_retention_seconds >= 60 && var.message_retention_seconds <= 1209600 - ) - error_message = "Var must be between 60 and 1209600." - } -} - -variable "max_message_size" { - type = number - description = "The limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB)." - default = 262144 - validation { - condition = ( - var.max_message_size >= 1024 && var.max_message_size <= 262144 - ) - error_message = "Var must be between 1024 and 262144." - } -} - -variable "delay_seconds" { - type = number - description = "The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 seconds." - default = 0 - validation { - condition = ( - var.delay_seconds >= 0 && var.delay_seconds <= 900 - ) - error_message = "Var must be between 0 and 900." - } -} - -variable "receive_wait_time_seconds" { - type = number - description = "The time for which a ReceiveMessage call will wait for a message to arrive (long polling) before returning. An integer from 0 to 20 (seconds). The default for this attribute is 0, meaning that the call will return immediately." - default = 0 - validation { - condition = ( - var.receive_wait_time_seconds >= 0 && var.receive_wait_time_seconds <= 20 - ) - error_message = "Var must be between 0 and 20." - } -} - -variable "policy" { - type = list(string) - description = "This is a list of 0 or 1. The JSON policy for the SQS queue. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy)." - default = [] -} - -variable "redrive_policy" { - type = list(string) - description = "This is a list of 0 or 1. The JSON policy to set up the Dead Letter Queue, see AWS docs. Note: when specifying maxReceiveCount, you must specify it as an integer (5), and not a string (\"5\")." - default = [] -} - -variable "fifo_queue" { - type = bool - description = "Boolean designating a FIFO queue. If not set, it defaults to false making it standard." - default = false -} - -variable "fifo_throughput_limit" { - type = list(string) - description = "This is a list of 0 or 1. Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. This can be specified if fifo_queue is true." - default = [] - validation { - condition = ( - length(var.fifo_throughput_limit) > 0 ? contains(["perQueue", "perMessageGroupId"], var.fifo_throughput_limit[0]) : true - ) - error_message = "Var must be one of \"perQueue\", \"perMessageGroupId\"." - } -} - -variable "content_based_deduplication" { - type = bool - description = "Enables content-based deduplication for FIFO queues. For more information, see the [related documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing)" - default = false -} - -variable "kms_master_key_id" { - type = list(string) - description = "This is a list of 0 or 1. The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see [Key Terms](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms)." - default = ["alias/aws/sqs"] -} - -variable "kms_data_key_reuse_period_seconds" { - type = number - description = "The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes)." - default = 300 - validation { - condition = ( - var.kms_data_key_reuse_period_seconds >= 60 && var.kms_data_key_reuse_period_seconds <= 86400 - ) - error_message = "Var must be between 60 and 86400." - } -} - -variable "deduplication_scope" { - type = list(string) - description = "This is a list of 0 or 1. Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. This can be specified if fifo_queue is true." - default = [] - validation { - condition = ( - length(var.deduplication_scope) > 0 ? contains(["messageGroup", "queue"], var.deduplication_scope[0]) : true - ) - error_message = "Var must be one of \"messageGroup\", \"queue\"." - } -} diff --git a/modules/sqs-queue/outputs.tf b/modules/sqs-queue/outputs.tf index 6126209db..ef290e84d 100644 --- a/modules/sqs-queue/outputs.tf +++ b/modules/sqs-queue/outputs.tf @@ -1,19 +1,4 @@ -output "url" { - description = "The URL of the created Amazon SQS queue." - value = module.sqs_queue.url -} - -output "id" { - description = "The ID of the created Amazon SQS queue. Same as the URL." - value = module.sqs_queue.id -} - -output "name" { - description = "The name of the created Amazon SQS queue." - value = module.sqs_queue.name -} - -output "arn" { - description = "The ARN of the created Amazon SQS queue" - value = module.sqs_queue.arn +output "sqs_queue" { + description = "The SQS queue." + value = module.sqs } diff --git a/modules/sqs-queue/providers.tf b/modules/sqs-queue/providers.tf index efa9ede5d..ef923e10a 100644 --- a/modules/sqs-queue/providers.tf +++ b/modules/sqs-queue/providers.tf @@ -1,11 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -14,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/sqs-queue/variables.tf b/modules/sqs-queue/variables.tf index 0d5cf7c01..48f938e2e 100644 --- a/modules/sqs-queue/variables.tf +++ b/modules/sqs-queue/variables.tf @@ -33,16 +33,94 @@ variable "receive_wait_time_seconds" { default = 0 } -variable "policy" { - type = list(string) - description = "The JSON policy for the SQS queue. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy)." - default = [] +variable "dlq_enabled" { + type = bool + description = "Boolean designating whether the Dead Letter Queue should be created by this component." + default = false } -variable "redrive_policy" { - type = list(string) - description = "The JSON policy to set up the Dead Letter Queue, see AWS docs. Note: when specifying maxReceiveCount, you must specify it as an integer (5), and not a string (\"5\")." - default = [] +variable "dlq_name_suffix" { + type = string + description = "The suffix of the Dead Letter Queue." + default = "dlq" +} + +variable "dlq_max_receive_count" { + type = number + description = "The number of times a message can be unsuccessfully dequeued before being moved to the Dead Letter Queue." + default = 5 +} + +variable "dlq_content_based_deduplication" { + description = "Enables content-based deduplication for FIFO queues" + type = bool + default = null +} + +variable "dlq_deduplication_scope" { + description = "Specifies whether message deduplication occurs at the message group or queue level" + type = string + default = null +} + +variable "dlq_delay_seconds" { + description = "The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes)" + type = number + default = null +} + +variable "dlq_kms_data_key_reuse_period_seconds" { + description = "The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours)" + type = number + default = null +} + +variable "dlq_kms_master_key_id" { + description = "The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK" + type = string + default = null +} + +variable "dlq_message_retention_seconds" { + description = "The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days)" + type = number + default = null +} + +variable "dlq_receive_wait_time_seconds" { + description = "The time for which a ReceiveMessage call will wait for a message to arrive (long polling) before returning. An integer from 0 to 20 (seconds)" + type = number + default = null +} + +variable "create_dlq_redrive_allow_policy" { + description = "Determines whether to create a redrive allow policy for the dead letter queue." + type = bool + default = true +} + +variable "dlq_redrive_allow_policy" { + description = "The JSON policy to set up the Dead Letter Queue redrive permission, see AWS docs." + type = any + default = {} +} + +variable "dlq_sqs_managed_sse_enabled" { + description = "Boolean to enable server-side encryption (SSE) of message content with SQS-owned encryption keys" + type = bool + default = true +} + +variable "dlq_visibility_timeout_seconds" { + description = "The visibility timeout for the queue. An integer from 0 to 43200 (12 hours)" + type = number + default = null +} + +variable "dlq_tags" { + description = "A mapping of additional tags to assign to the dead letter queue" + type = map(string) + default = {} } variable "fifo_queue" { @@ -52,9 +130,9 @@ variable "fifo_queue" { } variable "fifo_throughput_limit" { - type = list(string) + type = string description = "Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. This can be specified if fifo_queue is true." - default = [] + default = null } variable "content_based_deduplication" { @@ -64,9 +142,9 @@ variable "content_based_deduplication" { } variable "kms_master_key_id" { - type = list(string) + type = string description = "The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms." - default = ["alias/aws/sqs"] + default = null } variable "kms_data_key_reuse_period_seconds" { @@ -75,8 +153,55 @@ variable "kms_data_key_reuse_period_seconds" { default = 300 } +variable "iam_policy_limit_to_current_account" { + type = bool + description = "Boolean designating whether the IAM policy should be limited to the current account." + default = true +} + variable "deduplication_scope" { - type = list(string) - description = "Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. This can be specified if fifo_queue is true." + description = "Specifies whether message deduplication occurs at the message group or queue level" + type = string + default = null +} + +variable "sqs_managed_sse_enabled" { + description = "Boolean to enable server-side encryption (SSE) of message content with SQS-owned encryption keys" + type = bool + default = true +} + +variable "iam_policy" { + type = list(object({ + policy_id = optional(string, null) + version = optional(string, null) + statements = list(object({ + sid = optional(string, null) + effect = optional(string, null) + actions = optional(list(string), null) + not_actions = optional(list(string), null) + resources = optional(list(string), null) + not_resources = optional(list(string), null) + conditions = optional(list(object({ + test = string + variable = string + values = list(string) + })), []) + principals = optional(list(object({ + type = string + identifiers = list(string) + })), []) + not_principals = optional(list(object({ + type = string + identifiers = list(string) + })), []) + })) + })) + description = <<-EOT + IAM policy as list of Terraform objects, compatible with Terraform `aws_iam_policy_document` data source + except that `source_policy_documents` and `override_policy_documents` are not included. + Use inputs `iam_source_policy_documents` and `iam_override_policy_documents` for that. + EOT default = [] + nullable = false } diff --git a/modules/sqs-queue/versions.tf b/modules/sqs-queue/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/sqs-queue/versions.tf +++ b/modules/sqs-queue/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/ssm-parameters/README.md b/modules/ssm-parameters/README.md index 6126a3d32..3c5c374bf 100644 --- a/modules/ssm-parameters/README.md +++ b/modules/ssm-parameters/README.md @@ -1,6 +1,14 @@ +--- +tags: + - component/ssm-parameters + - layer/addons + - provider/aws +--- + # Component: `ssm-parameters` -This component is responsible for provisioning Parameter Store resources against AWS SSM. It supports normal parameter store resources that can be configured directly in YAML OR pulling secret values from a local Sops file. +This component is responsible for provisioning Parameter Store resources against AWS SSM. It supports normal parameter +store resources that can be configured directly in YAML OR pulling secret values from a local Sops file. ## Usage @@ -25,21 +33,22 @@ components: type: String ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.2.5 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | | [aws](#requirement\_aws) | >= 4.0 | -| [sops](#requirement\_sops) | >= 0.5 | +| [sops](#requirement\_sops) | >= 0.5, < 1.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.0 | -| [sops](#provider\_sops) | >= 0.5 | +| [sops](#provider\_sops) | >= 0.5, < 1.0 | ## Modules @@ -67,8 +76,6 @@ components: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [kms\_arn](#input\_kms\_arn) | The ARN of a KMS key used to encrypt and decrypt SecretString values | `string` | `""` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | @@ -76,11 +83,11 @@ components: | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [params](#input\_params) | n/a |
map(object({
value = string
description = string
overwrite = bool
type = string
}))
| n/a | yes | +| [params](#input\_params) | A map of parameter values to write to SSM Parameter Store |
map(object({
value = string
description = string
overwrite = optional(bool, false)
tier = optional(string, "Standard")
type = string
}))
| n/a | yes | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [sops\_source\_file](#input\_sops\_source\_file) | The relative path to the SOPS file which is consumed as the source for creating parameter resources. | `string` | n/a | yes | -| [sops\_source\_key](#input\_sops\_source\_key) | The SOPS key to pull from the source file. | `string` | n/a | yes | +| [sops\_source\_file](#input\_sops\_source\_file) | The relative path to the SOPS file which is consumed as the source for creating parameter resources. | `string` | `""` | no | +| [sops\_source\_key](#input\_sops\_source\_key) | The SOPS key to pull from the source file. | `string` | `""` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -91,10 +98,11 @@ components: |------|-------------| | [created\_params](#output\_created\_params) | The keys of created SSM parameter store resources. | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/ssm-parameters) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/ssm-parameters) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/ssm-parameters/main.tf b/modules/ssm-parameters/main.tf index 2e42a052f..989a3c7a5 100644 --- a/modules/ssm-parameters/main.tf +++ b/modules/ssm-parameters/main.tf @@ -1,6 +1,9 @@ locals { - sops_yaml = yamldecode(data.sops_file.source.raw) - secret_params = nonsensitive(local.sops_yaml[var.sops_source_key]) + enabled = module.this.enabled + sops_enabled = local.enabled && length(var.sops_source_file) > 0 + + sops_yaml = local.sops_enabled ? yamldecode(data.sops_file.source[0].raw) : "" + secret_params = local.sops_enabled ? nonsensitive(local.sops_yaml[var.sops_source_key]) : {} secret_params_normalized = { for key, value in local.secret_params : @@ -13,11 +16,12 @@ locals { } } - params = var.enabled ? merge(var.params, local.secret_params_normalized) : {} + params = local.enabled ? merge(var.params, local.secret_params_normalized) : {} param_keys = keys(local.params) } data "sops_file" "source" { + count = local.sops_enabled ? 1 : 0 source_file = "${path.root}/${var.sops_source_file}" } @@ -26,6 +30,7 @@ resource "aws_ssm_parameter" "destination" { name = each.key description = each.value.description + tier = each.value.tier type = each.value.type key_id = var.kms_arn value = each.value.value diff --git a/modules/ssm-parameters/providers.tf b/modules/ssm-parameters/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/ssm-parameters/providers.tf +++ b/modules/ssm-parameters/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/ssm-parameters/variables.tf b/modules/ssm-parameters/variables.tf index f0382978f..e956f8a50 100644 --- a/modules/ssm-parameters/variables.tf +++ b/modules/ssm-parameters/variables.tf @@ -6,24 +6,28 @@ variable "region" { variable "sops_source_file" { type = string description = "The relative path to the SOPS file which is consumed as the source for creating parameter resources." + default = "" } variable "sops_source_key" { type = string description = "The SOPS key to pull from the source file." + default = "" } variable "kms_arn" { type = string - default = "" description = "The ARN of a KMS key used to encrypt and decrypt SecretString values" + default = "" } variable "params" { type = map(object({ value = string description = string - overwrite = bool + overwrite = optional(bool, false) + tier = optional(string, "Standard") type = string })) + description = "A map of parameter values to write to SSM Parameter Store" } diff --git a/modules/ssm-parameters/versions.tf b/modules/ssm-parameters/versions.tf index 6cd6a3381..300f9596d 100644 --- a/modules/ssm-parameters/versions.tf +++ b/modules/ssm-parameters/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.2.5" + required_version = ">= 1.3.0" required_providers { aws = { diff --git a/modules/sso-saml-provider/README.md b/modules/sso-saml-provider/README.md new file mode 100644 index 000000000..cf3b076a7 --- /dev/null +++ b/modules/sso-saml-provider/README.md @@ -0,0 +1,28 @@ +--- +tags: + - component/sso-saml-provider + - layer/software-delivery + - provider/aws +--- + +# Component: `sso-saml-provider` + +This component reads sso credentials from SSM Parameter store and provides them as outputs + +## Usage + +**Stack Level**: Regional + +Use this in the catalog or use these variables to overwrite the catalog values. + +```yaml +components: + terraform: + sso-saml-provider: + settings: + spacelift: + workspace_enabled: true + vars: + enabled: true + ssm_path_prefix: "/sso/saml/google" +``` diff --git a/modules/sso-saml-provider/context.tf b/modules/sso-saml-provider/context.tf new file mode 100644 index 000000000..5e0ef8856 --- /dev/null +++ b/modules/sso-saml-provider/context.tf @@ -0,0 +1,279 @@ +# +# ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label +# All other instances of this file should be a copy of that one +# +# +# Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf +# and then place it in your Terraform module to automatically get +# Cloud Posse's standard configuration inputs suitable for passing +# to Cloud Posse modules. +# +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# +# Modules should access the whole context as `module.this.context` +# to get the input variables with nulls for defaults, +# for example `context = module.this.context`, +# and access individual variables as `module.this.`, +# with final values filled in. +# +# For example, when using defaults, `module.this.context.delimiter` +# will be null, and `module.this.delimiter` will be `-` (hyphen). +# + +module "this" { + source = "cloudposse/label/null" + version = "0.25.0" # requires Terraform >= 0.13.0 + + enabled = var.enabled + namespace = var.namespace + tenant = var.tenant + environment = var.environment + stage = var.stage + name = var.name + delimiter = var.delimiter + attributes = var.attributes + tags = var.tags + additional_tag_map = var.additional_tag_map + label_order = var.label_order + regex_replace_chars = var.regex_replace_chars + id_length_limit = var.id_length_limit + label_key_case = var.label_key_case + label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags + + context = var.context +} + +# Copy contents of cloudposse/terraform-null-label/variables.tf here + +variable "context" { + type = any + default = { + enabled = true + namespace = null + tenant = null + environment = null + stage = null + name = null + delimiter = null + attributes = [] + tags = {} + additional_tag_map = {} + regex_replace_chars = null + label_order = [] + id_length_limit = null + label_key_case = null + label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] + } + description = <<-EOT + Single object for setting entire context at once. + See description of individual variables for details. + Leave string and numeric variables as `null` to use default value. + Individual variable settings (non-null) override settings in context object, + except for attributes, tags, and additional_tag_map, which are merged. + EOT + + validation { + condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`." + } + + validation { + condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"]) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "enabled" { + type = bool + default = null + description = "Set to false to prevent the module from creating any resources" +} + +variable "namespace" { + type = string + default = null + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" +} + +variable "environment" { + type = string + default = null + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" +} + +variable "stage" { + type = string + default = null + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" +} + +variable "name" { + type = string + default = null + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT +} + +variable "delimiter" { + type = string + default = null + description = <<-EOT + Delimiter to be used between ID elements. + Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. + EOT +} + +variable "attributes" { + type = list(string) + default = [] + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT +} + +variable "tags" { + type = map(string) + default = {} + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT +} + +variable "additional_tag_map" { + type = map(string) + default = {} + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT +} + +variable "label_order" { + type = list(string) + default = null + description = <<-EOT + The order in which the labels (ID elements) appear in the `id`. + Defaults to ["namespace", "environment", "stage", "name", "attributes"]. + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT +} + +variable "regex_replace_chars" { + type = string + default = null + description = <<-EOT + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. + If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. + EOT +} + +variable "id_length_limit" { + type = number + default = null + description = <<-EOT + Limit `id` to this many characters (minimum 6). + Set to `0` for unlimited length. + Set to `null` for keep the existing setting, which defaults to `0`. + Does not affect `id_full`. + EOT + validation { + condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0 + error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length." + } +} + +variable "label_key_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper`. + Default value: `title`. + EOT + + validation { + condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case) + error_message = "Allowed values: `lower`, `title`, `upper`." + } +} + +variable "label_value_case" { + type = string + default = null + description = <<-EOT + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. + Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. + Default value: `lower`. + EOT + + validation { + condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case) + error_message = "Allowed values: `lower`, `title`, `upper`, `none`." + } +} + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + +#### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/sso-saml-provider/main.tf b/modules/sso-saml-provider/main.tf new file mode 100644 index 000000000..01167147f --- /dev/null +++ b/modules/sso-saml-provider/main.tf @@ -0,0 +1,16 @@ +locals { + enabled = module.this.enabled + + url = try(module.store_read.map[format("%s/%s", var.ssm_path_prefix, "url")], "") + ca = try(module.store_read.map[format("%s/%s", var.ssm_path_prefix, "ca")], "") + issuer = try(module.store_read.map[format("%s/%s", var.ssm_path_prefix, "issuer")], "") +} + +module "store_read" { + source = "cloudposse/ssm-parameter-store/aws" + version = "0.10.0" + + context = module.this.context + + parameter_read = formatlist("%s/%s", var.ssm_path_prefix, ["url", "ca", "issuer"]) +} diff --git a/modules/sso-saml-provider/outputs.tf b/modules/sso-saml-provider/outputs.tf new file mode 100644 index 000000000..b855bfdb3 --- /dev/null +++ b/modules/sso-saml-provider/outputs.tf @@ -0,0 +1,32 @@ +output "url" { + value = local.enabled ? local.url : null + description = "Identity Provider Single Sign-On URL" + sensitive = true +} + +output "ca" { + value = local.enabled ? local.ca : null + description = "Raw signing certificate" + sensitive = true +} + +output "issuer" { + value = local.enabled ? local.issuer : null + description = "Identity Provider Single Sign-On Issuer URL" + sensitive = true +} + +output "usernameAttr" { + value = local.enabled ? var.usernameAttr : null + description = "User name attribute" +} + +output "emailAttr" { + value = local.enabled ? var.emailAttr : null + description = "Email attribute" +} + +output "groupsAttr" { + value = local.enabled ? var.groupsAttr : null + description = "Groups attribute" +} diff --git a/modules/sso-saml-provider/providers.tf b/modules/sso-saml-provider/providers.tf new file mode 100644 index 000000000..ef923e10a --- /dev/null +++ b/modules/sso-saml-provider/providers.tf @@ -0,0 +1,19 @@ +provider "aws" { + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles" { + source = "../account-map/modules/iam-roles" + context = module.this.context +} diff --git a/modules/sso-saml-provider/variables.tf b/modules/sso-saml-provider/variables.tf new file mode 100644 index 000000000..bc5f6d3cd --- /dev/null +++ b/modules/sso-saml-provider/variables.tf @@ -0,0 +1,27 @@ +variable "region" { + type = string + description = "AWS Region" +} + +variable "ssm_path_prefix" { + type = string + description = "Top level SSM path prefix (without leading or trailing slash)" +} + +variable "usernameAttr" { + type = string + description = "User name attribute" + default = null +} + +variable "emailAttr" { + type = string + description = "Email attribute" + default = null +} + +variable "groupsAttr" { + type = string + description = "Group attribute" + default = null +} diff --git a/modules/sso-saml-provider/versions.tf b/modules/sso-saml-provider/versions.tf new file mode 100644 index 000000000..f33ede77f --- /dev/null +++ b/modules/sso-saml-provider/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.0" + } + } +} diff --git a/modules/sso/default.auto.tfvars b/modules/sso/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/sso/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/strongdm/README.md b/modules/strongdm/README.md index dc234635e..91f0a6941 100644 --- a/modules/strongdm/README.md +++ b/modules/strongdm/README.md @@ -1,6 +1,13 @@ +--- +tags: + - component/strongdm + - layer/unassigned + - provider/aws +--- + # Component: `strongdm` -This component provisions [strongDM](https://www.strongdm.com/) gateway, relay and roles +This component provisions [strongDM](https://www.strongdm.com/) gateway, relay and roles ## Usage @@ -16,6 +23,7 @@ components: enabled: true ``` + ## Requirements @@ -69,12 +77,11 @@ components: | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {}
}
| no | | [create\_roles](#input\_create\_roles) | Set `true` to create roles (should only be set in one account) | `bool` | `false` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between `namespace`, `environment`, `stage`, `name` and `attributes`.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | -| [dns\_zone](#input\_dns\_zone) | n/a | `string` | `null` | no | +| [dns\_zone](#input\_dns\_zone) | DNS zone (e.g. example.com) into which to install the web host. | `string` | `null` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [gateway\_count](#input\_gateway\_count) | Number of gateways to provision | `number` | `2` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for default, which is `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | | [install\_gateway](#input\_install\_gateway) | Set `true` to install a pair of gateways | `bool` | `false` | no | | [install\_relay](#input\_install\_relay) | Set `true` to install a pair of relays | `bool` | `true` | no | | [kms\_alias\_name](#input\_kms\_alias\_name) | AWS KMS alias used for encryption/decryption default is alias used in SSM | `string` | `"alias/aws/ssm"` | no | @@ -97,7 +104,9 @@ components: No outputs. + ## References -* https://github.com/spotinst/spotinst-kubernetes-helm-charts -* https://docs.spot.io/ocean/tutorials/spot-kubernetes-controller/ + +- https://github.com/spotinst/spotinst-kubernetes-helm-charts +- https://docs.spot.io/ocean/tutorials/spot-kubernetes-controller/ diff --git a/modules/strongdm/charts/strongdm/templates/_helpers.tpl b/modules/strongdm/charts/strongdm/templates/_helpers.tpl index e5f798fa4..efd52cd75 100644 --- a/modules/strongdm/charts/strongdm/templates/_helpers.tpl +++ b/modules/strongdm/charts/strongdm/templates/_helpers.tpl @@ -81,4 +81,3 @@ Create the name of the controller service account to use {{ default "default" .Values.serviceAccount.name }} {{- end -}} {{- end -}} - diff --git a/modules/strongdm/provider-strongdm.tf b/modules/strongdm/provider-strongdm.tf new file mode 100644 index 000000000..c704f951c --- /dev/null +++ b/modules/strongdm/provider-strongdm.tf @@ -0,0 +1,26 @@ +provider "aws" { + alias = "api_keys" + region = var.ssm_region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles_network.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles_network.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles_network.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "iam_roles_network" { + source = "../account-map/modules/iam-roles" + stage = var.ssm_account + context = module.this.context +} + +provider "sdm" { + api_access_key = local.enabled ? data.aws_ssm_parameter.api_access_key[0].value : null + api_secret_key = local.enabled ? data.aws_ssm_parameter.api_secret_key[0].value : null +} diff --git a/modules/strongdm/providers.tf b/modules/strongdm/providers.tf old mode 100755 new mode 100644 index c05628072..ef923e10a --- a/modules/strongdm/providers.tf +++ b/modules/strongdm/providers.tf @@ -1,34 +1,19 @@ provider "aws" { region = var.region - profile = coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) -} + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name -module "iam_roles" { - source = "../account-map/modules/iam-roles" - context = module.this.context + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } } -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -provider "aws" { - alias = "api_keys" - region = var.ssm_region - - profile = coalesce(var.import_profile_name, module.iam_roles_network.terraform_profile_name) -} - -module "iam_roles_network" { +module "iam_roles" { source = "../account-map/modules/iam-roles" - stage = var.ssm_account context = module.this.context } - -provider "sdm" { - api_access_key = local.enabled ? data.aws_ssm_parameter.api_access_key[0].value : null - api_secret_key = local.enabled ? data.aws_ssm_parameter.api_secret_key[0].value : null -} diff --git a/modules/strongdm/variables.tf b/modules/strongdm/variables.tf index 4abcddc63..1057c5886 100644 --- a/modules/strongdm/variables.tf +++ b/modules/strongdm/variables.tf @@ -53,7 +53,7 @@ variable "kubernetes_namespace" { variable "dns_zone" { type = string - description = "" + description = "DNS zone (e.g. example.com) into which to install the web host." default = null } diff --git a/modules/tfstate-backend/README.md b/modules/tfstate-backend/README.md index 090eea7ed..cdd0e2a5a 100644 --- a/modules/tfstate-backend/README.md +++ b/modules/tfstate-backend/README.md @@ -1,65 +1,133 @@ +--- +tags: + - component/tfstate-backend + - layer/foundation + - provider/aws + - privileged +--- + # Component: `tfstate-backend` -This component is responsible for provisioning an S3 Bucket and DynamoDB table that follow security best practices for usage as a Terraform backend. It also creates IAM roles for access to the Terraform backend. +This component is responsible for provisioning an S3 Bucket and DynamoDB table that follow security best practices for +usage as a Terraform backend. It also creates IAM roles for access to the Terraform backend. -Once the initial S3 backend is configured, this component can create additional backends, allowing you to segregate them and control access to each backend separately. This may be desirable because any secret or sensitive information (such as generated passwords) that Terraform has access to gets stored in the Terraform state backend S3 bucket, so you may wish to restrict who can read the production Terraform state backend S3 bucket. -However, perhaps counter-intuitively, all Terraform users require read access to the most sensitive accounts, such as `root` and `audit`, in order to read security configuration information, so careful planning is required when architecting backend splits. +Once the initial S3 backend is configured, this component can create additional backends, allowing you to segregate them +and control access to each backend separately. This may be desirable because any secret or sensitive information (such +as generated passwords) that Terraform has access to gets stored in the Terraform state backend S3 bucket, so you may +wish to restrict who can read the production Terraform state backend S3 bucket. However, perhaps counter-intuitively, +all Terraform users require read access to the most sensitive accounts, such as `root` and `audit`, in order to read +security configuration information, so careful planning is required when architecting backend splits. +## Prerequisites -### Access Control +> [!TIP] +> +> Part of cold start, so it has to initially be run with `SuperAdmin`, multiple times: to create the S3 bucket and then +> to move the state into it. Follow the guide +> **[here](https://docs.cloudposse.com/layers/accounts/tutorials/manual-configuration/#provision-tfstate-backend-component)** +> to get started. -For each backend, this module will create an IAM role with read/write access and, optionally, an IAM role with read-only access. You can configure who is allowed to assume these roles. -- While read/write access is required for `terraform apply`, the created role only grants read/write access to the Terraform state, it does not grant permission to create/modify/destroy AWS resources. -- Similarly, while the read-only role prohibits making changes to the Terraform state, it does not prevent anyone from making changes to AWS resources using a different role. -- Many Cloud Posse components store information about resources they create in the Terraform state via their outputs, and many other components read this information from the Terraform state backend via the CloudPosse `remote-state` module and use it as part of their configuration. For example, the `account-map` component exists solely for the purpose of organizing information about the created AWS accounts and storing it in its Terraform state, making it available via `remote-state`. This means that you if you are going to restrict access to some backends, you need to carefully orchestrate what is stored there and ensure that you are not storing information a component needs in a backend it will not have access to. Typically, information in the most sensitive accounts, such as `root`, `audit`, and `security`, is nevertheless needed by every account, for example to know where to send audit logs, so it is not obvious and can be counter-intuitive which accounts need access to which backends. Plan carefully. -- Atmos provides separate configuration for Terraform state access via the `backend` and `remote_state_backend` settings. Always configure the `backend` setting with a role that has read/write access (and override that setting to be `null` for components deployed by SuperAdmin). If a read-only role is available (and we recommend you create one via this module), use that role in `remote_state_backend.s3.role_arn`. -- Note that the "read-only" in the "read-only role" refers solely to the S3 bucket that stores the backend data. That role still has read/write access to the DynamoDB table, which is desirable so that users restricted to the read-only role can still perform drift detection by running `terraform plan`. The DynamoDB table only stores checksums and mutual-exclusion lock information, so it is not considered sensitive. The worst a malicious user could do would be to corrupt the table and cause a denial-of-service (DoS) for Terraform, but such DoS would only affect making changes to the infrastructure, it would not affect the operation of the existing infrastructure, so it is an ineffective and therefore unlikely vector of attack. (Also note that the entire DynamoDB table is optional and can be deleted entirely; Terraform will repopulate it as new activity takes place.) +- This component assumes you are using the `aws-teams` and `aws-team-roles` components. +- Before the `account` and `account-map` components are deployed for the first time, you'll want to run this component + with `access_roles_enabled` set to `false` to prevent errors due to missing IAM Role ARNs. This will enable only + enough access to the Terraform state for you to finish provisioning accounts and roles. After those components have + been deployed, you will want to run this component again with `access_roles_enabled` set to `true` to provide the + complete access as configured in the stacks. +### Access Control +For each backend, this module will create an IAM role with read/write access and, optionally, an IAM role with read-only +access. You can configure who is allowed to assume these roles. + +- While read/write access is required for `terraform apply`, the created role only grants read/write access to the + Terraform state, it does not grant permission to create/modify/destroy AWS resources. + +- Similarly, while the read-only role prohibits making changes to the Terraform state, it does not prevent anyone from + making changes to AWS resources using a different role. + +- Many Cloud Posse components store information about resources they create in the Terraform state via their outputs, + and many other components read this information from the Terraform state backend via the CloudPosse `remote-state` + module and use it as part of their configuration. For example, the `account-map` component exists solely for the + purpose of organizing information about the created AWS accounts and storing it in its Terraform state, making it + available via `remote-state`. This means that you if you are going to restrict access to some backends, you need to + carefully orchestrate what is stored there and ensure that you are not storing information a component needs in a + backend it will not have access to. Typically, information in the most sensitive accounts, such as `root`, `audit`, + and `security`, is nevertheless needed by every account, for example to know where to send audit logs, so it is not + obvious and can be counter-intuitive which accounts need access to which backends. Plan carefully. + +- Atmos provides separate configuration for Terraform state access via the `backend` and `remote_state_backend` + settings. Always configure the `backend` setting with a role that has read/write access (and override that setting to + be `null` for components deployed by SuperAdmin). If a read-only role is available (only helpful if you have more than + one backend), use that role in `remote_state_backend.s3.role_arn`. Otherwise, use the read/write role in + `remote_state_backend.s3.role_arn`, to ensure that all components can read the Terraform state, even if + `backend.s3.role_arn` is set to `null`, as it is with a few critical components meant to be deployed by SuperAdmin. + +- Note that the "read-only" in the "read-only role" refers solely to the S3 bucket that stores the backend data. That + role still has read/write access to the DynamoDB table, which is desirable so that users restricted to the read-only + role can still perform drift detection by running `terraform plan`. The DynamoDB table only stores checksums and + mutual-exclusion lock information, so it is not considered sensitive. The worst a malicious user could do would be to + corrupt the table and cause a denial-of-service (DoS) for Terraform, but such DoS would only affect making changes to + the infrastructure, it would not affect the operation of the existing infrastructure, so it is an ineffective and + therefore unlikely vector of attack. (Also note that the entire DynamoDB table is optional and can be deleted + entirely; Terraform will repopulate it as new activity takes place.) + +- For convenience, the component automatically grants access to the backend to the user deploying it. This is helpful + because it allows that user, presumably SuperAdmin, to deploy the normal components that expect the user does not have + direct access to Terraform state, without requiring custom configuration. However, you may want to explicitly grant + SuperAdmin access to the backend in the `allowed_principal_arns` configuration, to ensure that SuperAdmin can always + access the backend, even if the component is later updated by the `root-admin` role. + +### Quotas + +When allowing access to both SAML and AWS SSO users, the trust policy for the IAM roles created by this component can +exceed the default 2048 character limit. If you encounter this error, you can increase the limit by requesting a quota +increase [here](https://us-east-1.console.aws.amazon.com/servicequotas/home/services/iam/quotas/L-C07B4B0D). Note that +this is the IAM limit on "The maximum number of characters in an IAM role trust policy" and it must be configured in the +`us-east-1` region, regardless of what region you are deploying to. Normally 3072 characters is sufficient, and is +recommended so that you still have room to expand the trust policy in the future while perhaps considering how to reduce +its size. ## Usage -**Stack Level**: Regional (because DynamoDB is region-specific), but deploy only in a single region and only in the `root` account -**Deployment**: Must be deployed by SuperAdmin using `atmos` CLI +**Stack Level**: Regional (because DynamoDB is region-specific), but deploy only in a single region and only in the +`root` account **Deployment**: Must be deployed by SuperAdmin using `atmos` CLI -This component configures the shared Terraform backend, and as such is the first component that must be deployed, since all other components depend on it. In fact, this component even depends on itself, so special deployment procedures are needed for the initial deployment (documented in the "Cold Start" procedures). +This component configures the shared Terraform backend, and as such is the first component that must be deployed, since +all other components depend on it. In fact, this component even depends on itself, so special deployment procedures are +needed for the initial deployment (documented in the "Cold Start" procedures). Here's an example snippet for how to use this component. ```yaml - terraform: - tfstate-backend: - backend: - s3: - role_arn: null - settings: - spacelift: - workspace_enabled: false - vars: - enable_server_side_encryption: true - enabled: true - force_destroy: false - name: tfstate - prevent_unencrypted_uploads: true - access_roles: - default: &tfstate-access-template - write_enabled: true - allowed_roles: - identity: ["admin", "cicd", "poweruser", "spacelift", "terraform"] - denied_roles: {} - allowed_permission_sets: - identity: ["AdministratorAccess"] - denied_permission_sets: {} - allowed_principal_arns: [] - denied_principal_arns: [] - ro: - <<: *tfstate-access-template - write_enabled: false - allowed_roles: - identity: ["admin", "cicd", "poweruser", "spacelift", "terraform", "reader", "observer", "support"] - +terraform: + tfstate-backend: + backend: + s3: + role_arn: null + settings: + spacelift: + workspace_enabled: false + vars: + enable_server_side_encryption: true + enabled: true + force_destroy: false + name: tfstate + prevent_unencrypted_uploads: true + access_roles: + default: &tfstate-access-template + write_enabled: true + allowed_roles: + core-identity: ["devops", "developers", "managers", "spacelift"] + core-root: ["admin"] + denied_roles: {} + allowed_permission_sets: + core-identity: ["AdministratorAccess"] + denied_permission_sets: {} + allowed_principal_arns: [] + denied_principal_arns: [] ``` + ## Requirements @@ -67,12 +135,14 @@ Here's an example snippet for how to use this component. |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | | [aws](#requirement\_aws) | >= 4.9.0 | +| [awsutils](#requirement\_awsutils) | >= 0.16.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 4.9.0 | +| [awsutils](#provider\_awsutils) | >= 0.16.0 | ## Modules @@ -80,7 +150,7 @@ Here's an example snippet for how to use this component. |------|--------|---------| | [assume\_role](#module\_assume\_role) | ../account-map/modules/team-assume-role-policy | n/a | | [label](#module\_label) | cloudposse/label/null | 0.25.0 | -| [tfstate\_backend](#module\_tfstate\_backend) | cloudposse/tfstate-backend/aws | 0.38.1 | +| [tfstate\_backend](#module\_tfstate\_backend) | cloudposse/tfstate-backend/aws | 1.1.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -88,20 +158,24 @@ Here's an example snippet for how to use this component. | Name | Type | |------|------| | [aws_iam_role.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_arn.cold_start_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/arn) | data source | +| [aws_iam_policy_document.cold_start_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.tfstate](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [awsutils_caller_identity.current](https://registry.terraform.io/providers/cloudposse/awsutils/latest/docs/data-sources/caller_identity) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [access\_roles](#input\_access\_roles) | Map of access roles to create (key is role name, use "default" for same as component). See iam-assume-role-policy module for details. |
map(object({
write_enabled = bool
allowed_roles = map(list(string))
denied_roles = map(list(string))
allowed_principal_arns = list(string)
denied_principal_arns = list(string)
allowed_permission_sets = map(list(string))
denied_permission_sets = map(list(string))
}))
| `{}` | no | -| [access\_roles\_enabled](#input\_access\_roles\_enabled) | Enable creation of access roles. Set false for cold start (before account-map has been created). | `bool` | `true` | no | +| [access\_roles\_enabled](#input\_access\_roles\_enabled) | Enable access roles to be assumed. Set `false` for cold start (before account-map has been created),
because the role to ARN mapping has not yet been created.
Note that the current caller and any `allowed_principal_arns` will always be allowed to assume the role. | `bool` | `true` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [enable\_point\_in\_time\_recovery](#input\_enable\_point\_in\_time\_recovery) | Enable DynamoDB point-in-time recovery | `bool` | `false` | no | +| [enable\_point\_in\_time\_recovery](#input\_enable\_point\_in\_time\_recovery) | Enable DynamoDB point-in-time recovery | `bool` | `true` | no | | [enable\_server\_side\_encryption](#input\_enable\_server\_side\_encryption) | Enable DynamoDB and S3 server-side encryption | `bool` | `true` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | @@ -132,7 +206,9 @@ Here's an example snippet for how to use this component. | [tfstate\_backend\_s3\_bucket\_domain\_name](#output\_tfstate\_backend\_s3\_bucket\_domain\_name) | Terraform state S3 bucket domain name | | [tfstate\_backend\_s3\_bucket\_id](#output\_tfstate\_backend\_s3\_bucket\_id) | Terraform state S3 bucket ID | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/tfstate-backend) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/tfstate-backend) - + Cloud Posse's upstream component diff --git a/modules/tfstate-backend/iam.tf b/modules/tfstate-backend/iam.tf index caaa48a5b..afadaa1d5 100644 --- a/modules/tfstate-backend/iam.tf +++ b/modules/tfstate-backend/iam.tf @@ -1,15 +1,28 @@ locals { - access_roles = local.enabled && var.access_roles_enabled ? { + access_roles = local.enabled ? { for k, v in var.access_roles : ( length(split(module.this.delimiter, k)) > 1 ? k : module.label[k].id ) => v } : {} - access_roles_enabled = module.this.enabled && length(keys(local.access_roles)) > 0 + + access_roles_enabled = local.enabled && var.access_roles_enabled + cold_start_access_enabled = local.enabled && !var.access_roles_enabled + + # Technically, `eks_role_arn` is incorrect, because it strips any path from the ARN, + # but since we do not expect there to be a path in the role ARN (as opposed to perhaps an attached IAM policy), + # it is OK. The advantage of using `eks_role_arn` is that it converts and Assumed Role ARN from STS, like + # arn:aws:sts::123456789012:assumed-role/acme-core-gbl-root-admin/aws-go-sdk-1722029959251053170 + # to the IAM Role ARN, like + # arn:aws:iam::123456789012:role/acme-core-gbl-root-admin + caller_arn = coalesce(data.awsutils_caller_identity.current.eks_role_arn, data.awsutils_caller_identity.current.arn) } +data "awsutils_caller_identity" "current" {} +data "aws_partition" "current" {} + module "label" { - for_each = var.access_roles + for_each = local.enabled ? var.access_roles : {} source = "cloudposse/label/null" version = "0.25.0" # requires Terraform >= 0.13.0 @@ -24,12 +37,14 @@ module "label" { } module "assume_role" { - for_each = local.access_roles + for_each = local.access_roles_enabled ? local.access_roles : {} source = "../account-map/modules/team-assume-role-policy" - allowed_roles = each.value.allowed_roles - denied_roles = each.value.denied_roles - allowed_principal_arns = each.value.allowed_principal_arns + allowed_roles = each.value.allowed_roles + denied_roles = each.value.denied_roles + + # Allow whatever user or role is running Terraform to manage the backend to assume any backend access role + allowed_principal_arns = distinct(concat(each.value.allowed_principal_arns, [local.caller_arn])) denied_principal_arns = each.value.denied_principal_arns # Permission sets are for AWS SSO, which is optional allowed_permission_sets = try(each.value.allowed_permission_sets, {}) @@ -68,7 +83,7 @@ resource "aws_iam_role" "default" { name = each.key description = "${each.value.write_enabled ? "Access" : "Read-only access"} role for ${module.this.id}" - assume_role_policy = module.assume_role[each.key].policy_document + assume_role_policy = var.access_roles_enabled ? module.assume_role[each.key].policy_document : data.aws_iam_policy_document.cold_start_assume_role[each.key].json tags = merge(module.this.tags, { Name = each.key }) inline_policy { @@ -77,3 +92,53 @@ resource "aws_iam_role" "default" { } managed_policy_arns = [] } + +locals { + all_cold_start_access_principals = local.cold_start_access_enabled ? toset(concat([local.caller_arn], + flatten([for k, v in local.access_roles : v.allowed_principal_arns]))) : toset([]) + cold_start_access_principal_arns = local.cold_start_access_enabled ? { for k, v in local.access_roles : k => distinct(concat( + [local.caller_arn], v.allowed_principal_arns + )) } : {} + cold_start_access_principals = local.cold_start_access_enabled ? { + for k, v in local.cold_start_access_principal_arns : k => formatlist("arn:%v:iam::%v:root", data.aws_partition.current.partition, distinct([ + for arn in v : data.aws_arn.cold_start_access[arn].account + ])) + } : {} + +} + +data "aws_arn" "cold_start_access" { + for_each = local.all_cold_start_access_principals + arn = each.value +} + +# This is a basic policy that allows the caller and explicitly allowed principals to assume the role +# during the period roles are being set up (cold start). +data "aws_iam_policy_document" "cold_start_assume_role" { + for_each = local.cold_start_access_enabled ? local.access_roles : {} + + statement { + sid = "ColdStartRoleAssumeRole" + + effect = "Allow" + # These actions need to be kept in sync with the actions in the assume_role module + actions = [ + "sts:AssumeRole", + "sts:SetSourceIdentity", + "sts:TagSession", + ] + + condition { + test = "ArnLike" + variable = "aws:PrincipalArn" + values = local.cold_start_access_principal_arns[each.key] + } + + principals { + type = "AWS" + # Principals is a required field, so we allow any principal in any of the accounts, restricted by the assumed Role ARN in the condition clauses. + # This allows us to allow non-existent (yet to be created) roles, which would not be allowed if directly specified in `principals`. + identifiers = local.cold_start_access_principals[each.key] + } + } +} diff --git a/modules/tfstate-backend/main.tf b/modules/tfstate-backend/main.tf index 74b989e96..74f370ef2 100644 --- a/modules/tfstate-backend/main.tf +++ b/modules/tfstate-backend/main.tf @@ -4,12 +4,13 @@ locals { module "tfstate_backend" { source = "cloudposse/tfstate-backend/aws" - version = "0.38.1" + version = "1.1.0" - force_destroy = var.force_destroy - prevent_unencrypted_uploads = var.prevent_unencrypted_uploads - enable_server_side_encryption = var.enable_server_side_encryption - enable_point_in_time_recovery = var.enable_point_in_time_recovery + force_destroy = var.force_destroy + prevent_unencrypted_uploads = var.prevent_unencrypted_uploads + // enable_server_side_encryption = var.enable_server_side_encryption + enable_point_in_time_recovery = var.enable_point_in_time_recovery + bucket_ownership_enforced_enabled = false context = module.this.context } diff --git a/modules/tfstate-backend/variables.tf b/modules/tfstate-backend/variables.tf index 3ee90c1ac..47a25cd97 100644 --- a/modules/tfstate-backend/variables.tf +++ b/modules/tfstate-backend/variables.tf @@ -24,7 +24,7 @@ variable "enable_server_side_encryption" { variable "enable_point_in_time_recovery" { type = bool description = "Enable DynamoDB point-in-time recovery" - default = false + default = true } variable "access_roles" { @@ -43,6 +43,10 @@ variable "access_roles" { variable "access_roles_enabled" { type = bool - description = "Enable creation of access roles. Set false for cold start (before account-map has been created)." + description = <<-EOT + Enable access roles to be assumed. Set `false` for cold start (before account-map has been created), + because the role to ARN mapping has not yet been created. + Note that the current caller and any `allowed_principal_arns` will always be allowed to assume the role. + EOT default = true } diff --git a/modules/tfstate-backend/versions.tf b/modules/tfstate-backend/versions.tf index cc73ffd35..1130c8c07 100644 --- a/modules/tfstate-backend/versions.tf +++ b/modules/tfstate-backend/versions.tf @@ -6,5 +6,9 @@ terraform { source = "hashicorp/aws" version = ">= 4.9.0" } + awsutils = { + source = "cloudposse/awsutils" + version = ">= 0.16.0" + } } } diff --git a/modules/tgw/CHANGELOG.md b/modules/tgw/CHANGELOG.md new file mode 100644 index 000000000..9c2e9a799 --- /dev/null +++ b/modules/tgw/CHANGELOG.md @@ -0,0 +1,64 @@ +## Upgrading to `v1.276.0` + +Components PR [#804](https://github.com/cloudposse/terraform-aws-components/pull/804) + +### Affected Components + +- `tgw/hub` +- `tgw/spoke` +- `tgw/cross-region-hub-connector` + +### Summary + +This change to the Transit Gateway components, +[PR #804](https://github.com/cloudposse/terraform-aws-components/pull/804), added support for cross-region connections. + +As part of that change, we've added `environment` to the component identifier used in the Terraform Output created by +`tgw/hub`. Because of that map key change, all resources in Terraform now have a new resource identifier and therefore +must be recreated with Terraform or removed from state and imported into the new resource ID. + +Recreating the resources is the easiest solution but means that Transit Gateway connectivity will be lost while the +changes apply, which typically takes an hour. Alternatively, removing the resources from state and importing back into +the new resource ID is much more complex operationally but means no lost Transit Gateway connectivity. + +Since we use Transit Gateway for VPN and GitHub Automation runner access, a temporarily lost connection is not a +significant concern, so we choose to accept lost connectivity and recreate all `tgw/spoke` resources. + +### Steps + +1. Notify your team of a temporary VPN and Automation outage for accessing private networks +2. Deploy all `tgw/hub` components. There should be a hub component in each region of your network account connected to + Transit Gateway +3. Deploy all `tgw/spoke` components. There should be a spoke component in every account and every region connected to + Transit Gateway + +#### Tips + +Use workflows to deploy `tgw` across many accounts with a single command: + +```bash +atmos workflow deploy/tgw -f network +``` + +```yaml +# stacks/workflows/network.yaml +workflows: + deploy/tgw: + description: Provision the Transit Gateway "hub" and "spokes" for connecting VPCs. + steps: + - command: terraform deploy tgw/hub -s core-use1-network + name: hub + - command: terraform deploy tgw/spoke -s core-use1-network + - command: echo 'Creating core spokes for Transit Gateway' + type: shell + name: core-spokes + - command: terraform deploy tgw/spoke -s core-use1-corp + - command: terraform deploy tgw/spoke -s core-use1-auto + - command: terraform deploy tgw/spoke -s plat-use1-sandbox + - command: echo 'Creating platform spokes for Transit Gateway' + type: shell + name: plat-spokes + - command: terraform deploy tgw/spoke -s plat-use1-dev + - command: terraform deploy tgw/spoke -s plat-use1-staging + - command: terraform deploy tgw/spoke -s plat-use1-prod +``` diff --git a/modules/tgw/README.md b/modules/tgw/README.md new file mode 100644 index 000000000..3c979f9e4 --- /dev/null +++ b/modules/tgw/README.md @@ -0,0 +1,424 @@ +--- +tags: + - component/tgw + - layer/network + - provider/aws +--- + +# Component: `tgw` + +AWS Transit Gateway connects your Amazon Virtual Private Clouds (VPCs) and on-premises networks through a central hub. +This connection simplifies your network and puts an end to complex peering relationships. Transit Gateway acts as a +highly scalable cloud routerβ€”each new connection is made only once. + +For more on Transit Gateway, see [the AWS documentation](https://aws.amazon.com/transit-gateway/). + +## Requirements + +In order to connect accounts with Transit Gateway, we deploy Transit Gateway to a central account, typically +`core-network`, and then deploy Transit Gateway attachments for each connected account. Each connected accounts needs a +Transit Gateway attachment for the given account's VPC, either by VPC attachment or by Peering Connection attachment. +Furthermore, each private subnet in each connected VPC needs to explicitly list the CIDRs for all allowed connections. + +## Solution + +First we deploy the Transit Gateway Hub, `tgw/hub`, to a central network account. The component prepares the Transit +Gateway network with the following steps: + +1. Provision Transit Gateway in the network account +2. Collect VPC and EKS component output from every account connected to Transit Gateway +3. Share the Transit Gateway with the Organization using Resource Access Manager (RAM) + +By using the `tgw/hub` component to collect Terraform output from connected accounts, only this single component +requires access to the Terraform state of all connected accounts. + +Next we deploy `tgw/spoke` to the network account and then to every connected account. This spoke component connects the +given account to the central hub and any listed connection with the following steps: + +1. Create a Transit Gateway VPC attachment in the spoke account. This connects the account's VPC to the shared Transit + Gateway from the hub account. +2. Define all allowed routes for private subnets. Each private subnet in an account's VPC has it's own route table. This + route table needs to explicitly list any allowed connection to another account's VPC CIDR. +3. (Optional) Create an EKS Cluster Security Group rule to allow traffic to the cluster in the given account. + +## Implementation + +1. Deploy `tgw/hub` to the network account. List every allowed connection: + +```yaml +# stacks/catalog/tgw/hub +components: + terraform: + tgw/hub/defaults: + metadata: + type: abstract + component: tgw/hub + vars: + enabled: true + name: tgw-hub + tags: + Team: sre + Service: tgw-hub + + tgw/hub: + metadata: + inherits: + - tgw/hub/defaults + component: tgw/hub + vars: + # These are all connections available for spokes in this region + # Defaults environment to this region + connections: + - account: + tenant: core + stage: network + - account: + tenant: core + stage: auto + eks_component_names: + - eks/cluster + - account: + tenant: plat + stage: sandbox + eks_component_names: [] # No clusters deployed for sandbox + - account: + tenant: plat + stage: dev + eks_component_names: + - eks/cluster + - account: + tenant: plat + stage: staging + eks_component_names: + - eks/cluster + - account: + tenant: plat + stage: prod + eks_component_names: + - eks/cluster +``` + +2. Deploy `tgw/spoke` to network. List every account connected to network (all accounts): + +```yaml +# stacks/catalog/tgw/spoke +components: + terraform: + tgw/spoke-defaults: + metadata: + type: abstract + component: tgw/spoke + vars: + enabled: true + name: tgw-spoke + tgw_hub_tenant_name: core + tgw_hub_stage_name: network # default, added for visibility + tags: + Team: sre + Service: tgw-spoke +``` + +```yaml +# stacks/orgs/acme/core/network/us-east-1/network.yaml +tgw/spoke: + metadata: + inherits: + - tgw/spoke-defaults + vars: + # This is what THIS spoke is allowed to connect to + connections: + - account: + tenant: core + stage: network + - account: + tenant: core + stage: auto + - account: + tenant: plat + stage: sandbox + - account: + tenant: plat + stage: dev + - account: + tenant: plat + stage: staging + - account: + tenant: plat + stage: prod +``` + +3. Finally, deploy `tgw/spoke` for each connected account and list the allowed connections: + +```yaml +# stacks/orgs/acme/plat/dev/us-east-1/network.yaml +tgw/spoke: + metadata: + inherits: + - tgw/spoke-defaults + vars: + connections: + # Always list self + - account: + tenant: plat + stage: dev + - account: + tenant: core + stage: network + - account: + tenant: core + stage: auto +``` + +### Alternate Regions + +In order to connect any account to the network, the given account needs: + +1. Access to the shared Transit Gateway hub +2. An attachment for the given Transit Gateway hub +3. Routes to and from each private subnet + +However, sharing the Transit Gateway hub via RAM is only supported in the same region as the primary hub. Therefore, we +must instead deploy a new hub in the alternate region and create a +[Transit Gateway Peering Connection](https://docs.aws.amazon.com/vpc/latest/tgw/tgw-peering.html) between the two +Transit Gateway hubs. + +Furthermore, since this Transit Gateway hub for the alternate region is now peered, we must create a Peering Transit +Gateway attachment, opposed to a VPC Transit Gateway Attachment. + +#### Cross Region Deployment + +1. Deploy `tgw/hub` and `tgw/spoke` into the primary region as described in [Implementation](#implementation) + +2. Deploy `tgw/hub` and `tgw/cross-region-hub` into the new region in the network account. See the following + configuration: + +```yaml +# stacks/catalog/tgw/cross-region-hub +import: + - catalog/tgw/hub + +components: + terraform: + # Cross region TGW requires additional hub in the alternate region + tgw/hub: + vars: + # These are all connections available for spokes in this region + # Defaults environment to this region + connections: + # Hub for this region is always required + - account: + tenant: core + stage: network + # VPN source + - account: + tenant: core + stage: network + environment: use1 + # Github Runners + - account: + tenant: core + stage: auto + environment: use1 + eks_component_names: + - eks/cluster + # All stacks where a spoke will be deployed + - account: + tenant: plat + stage: dev + - account: + tenant: plat + stage: staging + - account: + tenant: plat + stage: prod + + # This alternate hub needs to be connected to the primary region's hub + tgw/cross-region-hub-connector: + vars: + enabled: true + primary_tgw_hub_region: us-east-1 +``` + +3. Deploy a `tgw/spoke` for network in the new region. For example: + +```yaml +# stacks/orgs/acme/core/network/us-west-2/network.yaml +tgw/spoke: + metadata: + inherits: + - tgw/spoke-defaults + vars: + peered_region: true # Required for alternate region spokes + connections: + # This stack, always included + - account: + tenant: core + stage: network + # VPN + - account: + tenant: core + environment: use1 + stage: network + # Automation runners + - account: + tenant: core + environment: use1 + stage: auto + eks_component_names: + - eks/cluster + # All other connections + - account: + tenant: plat + stage: dev + - account: + tenant: plat + stage: staging + - account: + tenant: plat + stage: prod +``` + +4. Deploy the `tgw/spoke` components for all connected accounts. For example: + +```yaml +# stacks/orgs/acme/plat/dev/us-west-2/network.yaml +tgw/spoke: + metadata: + inherits: + - tgw/spoke-defaults + vars: + peered_region: true # Required for alternate region spokes + connections: + # This stack, always included + - account: + tenant: plat + stage: dev + # TGW Hub, always included + - account: + tenant: core + stage: network + # VPN + - account: + tenant: core + environment: use1 + stage: network + # Automation runners + - account: + tenant: core + environment: use1 + stage: auto + eks_component_names: + - eks/cluster +``` + +5. Update any existing `tgw/spoke` connections to allow the new account and region. For example: + +```yaml +# stacks/orgs/acme/core/auto/us-east-1/network.yaml +tgw/spoke: + metadata: + inherits: + - tgw/spoke-defaults + vars: + connections: + - account: + tenant: core + stage: network + - account: + tenant: core + stage: corp + - account: + tenant: core + stage: auto + - account: + tenant: plat + stage: sandbox + - account: + tenant: plat + stage: dev + - account: + tenant: plat + stage: staging + - account: + tenant: plat + stage: prod + + # Alternate regions <-------- These are added for alternate region + - account: + tenant: core + stage: network + environment: usw2 + - account: + tenant: plat + stage: dev + environment: usw2 + - account: + tenant: plat + stage: staging + environment: usw2 + - account: + tenant: plat + stage: prod + environment: usw2 +``` + +## Destruction + +When destroying Transit Gateway components, order of operations matters. Always destroy any removed `tgw/spoke` +components before removing a connection from the `tgw/hub` component. + +The `tgw/hub` component creates map of VPC resources that each `tgw/spoke` component references. If the required +reference is removed before the `tgw/spoke` is destroyed, Terraform will fail to destroy the given `tgw/spoke` +component. + +:::info Pro Tip! + +[Atmos Workflows](https://atmos.tools/core-concepts/workflows/) make applying and destroying Transit Gateway much +easier! For example, to destroy components in the correct order, use a workflow similar to the following: + +```yaml +# stacks/workflows/network.yaml +workflows: + destroy/tgw: + description: Destroy the Transit Gateway "hub" and "spokes" for connecting VPCs. + steps: + - command: echo 'Destroying platform spokes for Transit Gateway' + type: shell + name: plat-spokes + - command: terraform destroy tgw/spoke -s plat-use1-sandbox --auto-approve + - command: terraform destroy tgw/spoke -s plat-use1-dev --auto-approve + - command: terraform destroy tgw/spoke -s plat-use1-staging --auto-approve + - command: terraform destroy tgw/spoke -s plat-use1-prod --auto-approve + - command: echo 'Destroying core spokes for Transit Gateway' + type: shell + name: core-spokes + - command: terraform destroy tgw/spoke -s core-use1-auto --auto-approve + - command: terraform destroy tgw/spoke -s core-use1-network --auto-approve + - command: echo 'Destroying Transit Gateway Hub' + type: shell + name: hub + - command: terraform destroy tgw/hub -s core-use1-network --auto-approve +``` + +::: + +# FAQ + +## `tgw/spoke` Fails to Recreate VPC Attachment with `DuplicateTransitGatewayAttachment` Error + +```bash +β•· +β”‚ Error: creating EC2 Transit Gateway VPC Attachment: DuplicateTransitGatewayAttachment: tgw-0xxxxxxxxxxxxxxxx has non-deleted Transit Gateway Attachments with same VPC ID. +β”‚ status code: 400, request id: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee +β”‚ +β”‚ with module.tgw_spoke_vpc_attachment.module.standard_vpc_attachment.aws_ec2_transit_gateway_vpc_attachment.default["core-use2-network"], +β”‚ on .terraform/modules/tgw_spoke_vpc_attachment.standard_vpc_attachment/main.tf line 43, in resource "aws_ec2_transit_gateway_vpc_attachment" "default": +β”‚ 43: resource "aws_ec2_transit_gateway_vpc_attachment" "default" { +β”‚ +β•΅ +Releasing state lock. This may take a few moments... +exit status 1 +``` + +This is caused by Terraform attempting to create the replacement VPC attachment before the original is completely +destroyed. Retry the apply. Now you should see only "create" actions. diff --git a/modules/tgw/cross-region-hub-connector/README.md b/modules/tgw/cross-region-hub-connector/README.md index 9f8b7e84c..8d2d7af97 100644 --- a/modules/tgw/cross-region-hub-connector/README.md +++ b/modules/tgw/cross-region-hub-connector/README.md @@ -1,99 +1,136 @@ -# Component: `cross-region-hub-connector` +--- +tags: + - component/tgw/cross-region-hub-connector + - layer/network + - provider/aws +--- -This component is responsible for provisioning an [AWS Transit Gateway Peering Connection](https://aws.amazon.com/transit-gateway) to connect TGWs from different accounts and(or) regions. +# Component: `tgw/cross-region-hub-connector` + +This component is responsible for provisioning an +[AWS Transit Gateway Peering Connection](https://aws.amazon.com/transit-gateway) to connect TGWs from different accounts +and(or) regions. + +Transit Gateway does not support sharing the Transit Gateway hub across regions. You must deploy a Transit Gateway hub +for each region and connect the alternate hub to the primary hub. ## Usage **Stack Level**: Regional -This component is deployed to each off-region tgw/hub. -meaning if your home region is `region-a`, and you just created a `tgw/hub` in `region-a` and `region-b`. To peer them, deploy this -to `region-b` +This component is deployed to each alternate region with `tgw/hub`. -This can be done by setting up a catalog to point to the main region, and simply importing it. +For example if your primary region is `us-east-1` and your alternate region is `us-west-2`, deploy another `tgw/hub` in +`us-west-2` and peer the two with `tgw/cross-region-hub-connector` with the following stack config, imported into +`us-west-2` ```yaml +import: + - catalog/tgw/hub + components: terraform: + # Cross region TGW requires additional hub in the alternate region + tgw/hub: + vars: + # These are all connections available for spokes in this region + # Defaults environment to this region + connections: + # Hub for this region is always required + - account: + tenant: core + stage: network + # VPN source + - account: + tenant: core + stage: network + environment: use1 + # Github Runners + - account: + tenant: core + stage: auto + environment: use1 + eks_component_names: + - eks/cluster + # All stacks where a spoke will be deployed + - account: + tenant: plat + stage: dev + eks_component_names: [] # Add clusters here once deployed + + # This alternate hub needs to be connected to the primary region's hub tgw/cross-region-hub-connector: vars: enabled: true - account_map_tenant_name: core - this_region: - tgw_stage_name: network - tgw_tenant_name: core - home_region: - tgw_name_format: "%s-%s" - tgw_stage_name: network - tgw_tenant_name: core - environment: region-a #short or fixed notation - region: region-a + primary_tgw_hub_region: us-east-1 ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [aws](#requirement\_aws) | >= 4.1 | +| [utils](#requirement\_utils) | >= 1.10.0 | ## Providers | Name | Version | |------|---------| -| [aws.tgw\_home\_region](#provider\_aws.tgw\_home\_region) | ~> 4.0 | -| [aws.tgw\_this\_region](#provider\_aws.tgw\_this\_region) | ~> 4.0 | +| [aws](#provider\_aws) | >= 4.1 | +| [aws.primary\_tgw\_hub\_region](#provider\_aws.primary\_tgw\_hub\_region) | >= 4.1 | ## Modules | Name | Source | Version | |------|--------|---------| -| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [iam\_role\_tgw\_home\_region](#module\_iam\_role\_tgw\_home\_region) | ../../account-map/modules/iam-roles | n/a | -| [iam\_role\_tgw\_this\_region](#module\_iam\_role\_tgw\_this\_region) | ../../account-map/modules/iam-roles | n/a | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [tgw\_home\_region](#module\_tgw\_home\_region) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [tgw\_this\_region](#module\_tgw\_this\_region) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [tgw\_hub\_primary\_region](#module\_tgw\_hub\_primary\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [tgw\_hub\_this\_region](#module\_tgw\_hub\_this\_region) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [utils](#module\_utils) | cloudposse/utils/aws | 1.3.0 | ## Resources | Name | Type | |------|------| -| [aws_ec2_transit_gateway_peering_attachment.tgw_peering](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_peering_attachment) | resource | -| [aws_ec2_transit_gateway_peering_attachment_accepter.tgw_peering_accepter](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_peering_attachment_accepter) | resource | -| [aws_ec2_transit_gateway_route_table_association.tgw_rt_associate_peering_cross_region](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_route_table_association) | resource | -| [aws_ec2_transit_gateway_route_table_association.tgw_rt_associate_peering_in_region](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_route_table_association) | resource | +| [aws_ec2_transit_gateway_peering_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_peering_attachment) | resource | +| [aws_ec2_transit_gateway_peering_attachment_accepter.primary_region](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_peering_attachment_accepter) | resource | +| [aws_ec2_transit_gateway_route_table_association.primary_region](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_route_table_association) | resource | +| [aws_ec2_transit_gateway_route_table_association.this_region](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_transit_gateway_route_table_association) | resource | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [account\_map\_tenant\_name](#input\_account\_map\_tenant\_name) | The name of the tenant where `account_map` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | +| [account\_map\_environment\_name](#input\_account\_map\_environment\_name) | The name of the environment where `account_map` is provisioned | `string` | `"gbl"` | no | +| [account\_map\_stage\_name](#input\_account\_map\_stage\_name) | The name of the stage where `account_map` is provisioned | `string` | `"root"` | no | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | +| [env\_naming\_convention](#input\_env\_naming\_convention) | The cloudposse/utils naming convention used to translate environment name to AWS region name. Options are `to_short` and `to_fixed` | `string` | `"to_short"` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [home\_region](#input\_home\_region) | Acceptors region config. Describe the transit gateway that should accept the peering |
object({
tgw_name_format = string
tgw_stage_name = string
tgw_tenant_name = string
region = string
environment = string
})
| n/a | yes | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [primary\_tgw\_hub\_region](#input\_primary\_tgw\_hub\_region) | The name of the AWS region where the primary Transit Gateway hub is deployed. This value is used with `var.env_naming_convention` to determine the primary Transit Gateway hub's environment name. | `string` | n/a | yes | +| [primary\_tgw\_hub\_stage](#input\_primary\_tgw\_hub\_stage) | The name of the stage where the primary Transit Gateway hub is deployed. Defaults to `module.this.stage` | `string` | `""` | no | +| [primary\_tgw\_hub\_tenant](#input\_primary\_tgw\_hub\_tenant) | The name of the tenant where the primary Transit Gateway hub is deployed. Only used if tenants are deployed and defaults to `module.this.tenant` | `string` | `""` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [this\_region](#input\_this\_region) | Initiators region config. Describe the transit gateway that should originate the peering |
object({
tgw_stage_name = string
tgw_tenant_name = string
})
| n/a | yes | ## Outputs @@ -101,9 +138,11 @@ components: |------|-------------| | [aws\_ec2\_transit\_gateway\_peering\_attachment\_id](#output\_aws\_ec2\_transit\_gateway\_peering\_attachment\_id) | Transit Gateway Peering Attachment ID | + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/tgw/cross-region-hub-connector) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/tgw/cross-region-hub-connector) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/tgw/cross-region-hub-connector/default.auto.tfvars b/modules/tgw/cross-region-hub-connector/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/tgw/cross-region-hub-connector/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/tgw/cross-region-hub-connector/main.tf b/modules/tgw/cross-region-hub-connector/main.tf index 614418980..4a6a0c190 100644 --- a/modules/tgw/cross-region-hub-connector/main.tf +++ b/modules/tgw/cross-region-hub-connector/main.tf @@ -1,40 +1,50 @@ locals { enabled = module.this.enabled + + primary_tgw_hub_tenant = length(var.primary_tgw_hub_tenant) > 0 ? var.primary_tgw_hub_tenant : module.this.tenant + primary_tgw_hub_stage = length(var.primary_tgw_hub_stage) > 0 ? var.primary_tgw_hub_stage : module.this.stage + primary_tgw_hub_account = module.this.tenant != null ? format("%s-%s", local.primary_tgw_hub_tenant, local.primary_tgw_hub_stage) : local.primary_tgw_hub_stage + primary_tgw_hub_account_id = module.account_map.outputs.full_account_map[local.primary_tgw_hub_account] } -# connects two transit gateways that are cross region -resource "aws_ec2_transit_gateway_peering_attachment" "tgw_peering" { - count = local.enabled ? 1 : 0 - provider = aws.tgw_this_region - peer_account_id = module.account_map.outputs.full_account_map[format(var.home_region.tgw_name_format, var.home_region.tgw_tenant_name, var.home_region.tgw_stage_name)] - peer_region = var.home_region.region - peer_transit_gateway_id = module.tgw_home_region.outputs.transit_gateway_id - transit_gateway_id = module.tgw_this_region.outputs.transit_gateway_id - tags = module.this.tags +# Connect two Transit Gateway Hubs across regions +resource "aws_ec2_transit_gateway_peering_attachment" "this" { + count = local.enabled ? 1 : 0 + + peer_account_id = local.primary_tgw_hub_account_id + peer_region = var.primary_tgw_hub_region + peer_transit_gateway_id = module.tgw_hub_primary_region.outputs.transit_gateway_id + transit_gateway_id = module.tgw_hub_this_region.outputs.transit_gateway_id + + tags = module.this.tags } -# accepts the above -resource "aws_ec2_transit_gateway_peering_attachment_accepter" "tgw_peering_accepter" { - count = local.enabled ? 1 : 0 - provider = aws.tgw_home_region - transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.tgw_peering.*.id) +# Accept the peering attachment in the primary region +resource "aws_ec2_transit_gateway_peering_attachment_accepter" "primary_region" { + count = local.enabled ? 1 : 0 + + provider = aws.primary_tgw_hub_region + + transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.this[*].id) tags = module.this.tags } -resource "aws_ec2_transit_gateway_route_table_association" "tgw_rt_associate_peering_in_region" { - count = local.enabled ? 1 : 0 - depends_on = [aws_ec2_transit_gateway_peering_attachment_accepter.tgw_peering_accepter] +resource "aws_ec2_transit_gateway_route_table_association" "this_region" { + count = local.enabled ? 1 : 0 + + transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.this[*].id) + transit_gateway_route_table_id = module.tgw_hub_this_region.outputs.transit_gateway_route_table_id - provider = aws.tgw_this_region - transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.tgw_peering.*.id) - transit_gateway_route_table_id = module.tgw_this_region.outputs.transit_gateway_route_table_id + depends_on = [aws_ec2_transit_gateway_peering_attachment_accepter.primary_region] } -resource "aws_ec2_transit_gateway_route_table_association" "tgw_rt_associate_peering_cross_region" { - count = local.enabled ? 1 : 0 - depends_on = [aws_ec2_transit_gateway_peering_attachment_accepter.tgw_peering_accepter] +resource "aws_ec2_transit_gateway_route_table_association" "primary_region" { + count = local.enabled ? 1 : 0 + + provider = aws.primary_tgw_hub_region + + transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.this[*].id) + transit_gateway_route_table_id = module.tgw_hub_primary_region.outputs.transit_gateway_route_table_id - provider = aws.tgw_home_region - transit_gateway_attachment_id = join("", aws_ec2_transit_gateway_peering_attachment.tgw_peering.*.id) - transit_gateway_route_table_id = module.tgw_home_region.outputs.transit_gateway_route_table_id + depends_on = [aws_ec2_transit_gateway_peering_attachment_accepter.primary_region] } diff --git a/modules/tgw/cross-region-hub-connector/outputs.tf b/modules/tgw/cross-region-hub-connector/outputs.tf index 8658de93d..f8beb7d4d 100644 --- a/modules/tgw/cross-region-hub-connector/outputs.tf +++ b/modules/tgw/cross-region-hub-connector/outputs.tf @@ -1,4 +1,4 @@ output "aws_ec2_transit_gateway_peering_attachment_id" { - value = join("", aws_ec2_transit_gateway_peering_attachment.tgw_peering.*.id) + value = join("", aws_ec2_transit_gateway_peering_attachment.this[*].id) description = "Transit Gateway Peering Attachment ID" } diff --git a/modules/tgw/cross-region-hub-connector/provider-tgw.tf b/modules/tgw/cross-region-hub-connector/provider-tgw.tf new file mode 100644 index 000000000..f69825d39 --- /dev/null +++ b/modules/tgw/cross-region-hub-connector/provider-tgw.tf @@ -0,0 +1,15 @@ +provider "aws" { + alias = "primary_tgw_hub_region" + region = var.primary_tgw_hub_region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} diff --git a/modules/tgw/cross-region-hub-connector/providers.tf b/modules/tgw/cross-region-hub-connector/providers.tf index 1c763a729..89ed50a98 100644 --- a/modules/tgw/cross-region-hub-connector/providers.tf +++ b/modules/tgw/cross-region-hub-connector/providers.tf @@ -1,62 +1,19 @@ -# Assuming region-a is default. -# tgw_this_region is network of region-b -# tgw_home_reigon is netowrk of region-a - provider "aws" { - alias = "tgw_this_region" region = var.region - profile = module.iam_role_tgw_this_region.profiles_enabled ? coalesce(var.import_profile_name, module.iam_role_tgw_this_region.terraform_profile_name) : null - dynamic "assume_role" { - for_each = module.iam_role_tgw_this_region.profiles_enabled ? [] : ["role"] - content { - role_arn = coalesce(var.import_role_arn, module.iam_role_tgw_this_region.terraform_role_arn) - } - } -} - -provider "aws" { - alias = "tgw_home_region" - region = var.home_region.region + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name - profile = module.iam_role_tgw_home_region.profiles_enabled ? coalesce(var.import_profile_name, module.iam_role_tgw_home_region.terraform_profile_name) : null dynamic "assume_role" { - for_each = module.iam_role_tgw_home_region.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_role_tgw_home_region.terraform_role_arn) + role_arn = assume_role.value } } } -module "iam_role_tgw_this_region" { +module "iam_roles" { source = "../../account-map/modules/iam-roles" - stage = var.this_region.tgw_stage_name - tenant = var.this_region.tgw_tenant_name context = module.this.context } - -module "iam_role_tgw_home_region" { - source = "../../account-map/modules/iam-roles" - stage = var.home_region.tgw_stage_name - tenant = var.home_region.tgw_tenant_name - environment = var.home_region.environment - context = module.this.context -} - -module "iam_roles" { - source = "../../account-map/modules/iam-roles" - global_tenant_name = var.account_map_tenant_name - context = module.this.context -} - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/tgw/cross-region-hub-connector/remote-state.tf b/modules/tgw/cross-region-hub-connector/remote-state.tf index acfd2f8ab..50ed6b57f 100644 --- a/modules/tgw/cross-region-hub-connector/remote-state.tf +++ b/modules/tgw/cross-region-hub-connector/remote-state.tf @@ -1,31 +1,42 @@ +locals { + primary_tgw_hub_environment = module.utils.region_az_alt_code_maps[var.env_naming_convention][var.primary_tgw_hub_region] +} + +# Used to translate region to environment +module "utils" { + source = "cloudposse/utils/aws" + version = "1.3.0" + enabled = local.enabled +} + module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "account-map" - stage = "root" - environment = "gbl" - tenant = var.account_map_tenant_name - context = module.this.context + environment = var.account_map_environment_name + stage = var.account_map_stage_name + + context = module.this.context } -module "tgw_this_region" { +module "tgw_hub_this_region" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "tgw/hub" - stage = var.this_region["tgw_stage_name"] - tenant = var.this_region["tgw_tenant_name"] - context = module.this.context + + context = module.this.context } -module "tgw_home_region" { +module "tgw_hub_primary_region" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "tgw/hub" - stage = var.home_region["tgw_stage_name"] - environment = var.home_region["environment"] - tenant = var.home_region["tgw_tenant_name"] - context = module.this.context + stage = local.primary_tgw_hub_stage + environment = local.primary_tgw_hub_environment + tenant = local.primary_tgw_hub_tenant + + context = module.this.context } diff --git a/modules/tgw/cross-region-hub-connector/variables.tf b/modules/tgw/cross-region-hub-connector/variables.tf index 753ad5f74..f24e1a798 100644 --- a/modules/tgw/cross-region-hub-connector/variables.tf +++ b/modules/tgw/cross-region-hub-connector/variables.tf @@ -3,31 +3,42 @@ variable "region" { description = "AWS Region" } -variable "this_region" { - type = object({ - tgw_stage_name = string - tgw_tenant_name = string - }) - description = "Initiators region config. Describe the transit gateway that should originate the peering" +variable "env_naming_convention" { + type = string + description = "The cloudposse/utils naming convention used to translate environment name to AWS region name. Options are `to_short` and `to_fixed`" + default = "to_short" + + validation { + condition = var.env_naming_convention != "to_short" || var.env_naming_convention != "to_fixed:" + error_message = "`var.env_naming_convention` must be either `to_short` or `to_fixed`." + } } -variable "home_region" { - type = object({ - tgw_name_format = string - tgw_stage_name = string - tgw_tenant_name = string - region = string - environment = string - }) - description = "Acceptors region config. Describe the transit gateway that should accept the peering" +variable "primary_tgw_hub_tenant" { + type = string + description = "The name of the tenant where the primary Transit Gateway hub is deployed. Only used if tenants are deployed and defaults to `module.this.tenant`" + default = "" } -variable "account_map_tenant_name" { +variable "primary_tgw_hub_stage" { type = string - description = <<-EOT - The name of the tenant where `account_map` is provisioned. + description = "The name of the stage where the primary Transit Gateway hub is deployed. Defaults to `module.this.stage`" + default = "" +} - If the `tenant` label is not used, leave this as `null`. - EOT - default = null +variable "primary_tgw_hub_region" { + type = string + description = "The name of the AWS region where the primary Transit Gateway hub is deployed. This value is used with `var.env_naming_convention` to determine the primary Transit Gateway hub's environment name." +} + +variable "account_map_environment_name" { + type = string + description = "The name of the environment where `account_map` is provisioned" + default = "gbl" +} + +variable "account_map_stage_name" { + type = string + description = "The name of the stage where `account_map` is provisioned" + default = "root" } diff --git a/modules/tgw/cross-region-hub-connector/versions.tf b/modules/tgw/cross-region-hub-connector/versions.tf index e89eb16ed..d2cc89dba 100644 --- a/modules/tgw/cross-region-hub-connector/versions.tf +++ b/modules/tgw/cross-region-hub-connector/versions.tf @@ -4,7 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.1" + } + utils = { + source = "cloudposse/utils" + version = ">= 1.10.0" } } } diff --git a/modules/tgw/cross-region-spoke/default.auto.tfvars b/modules/tgw/cross-region-spoke/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/tgw/cross-region-spoke/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/tgw/cross-region-spoke/modules/tgw_routes/outputs.tf b/modules/tgw/cross-region-spoke/modules/tgw_routes/outputs.tf deleted file mode 100644 index 0832bbb24..000000000 --- a/modules/tgw/cross-region-spoke/modules/tgw_routes/outputs.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "aws_ec2_transit_gateway_routes" { - value = aws_ec2_transit_gateway_route.default -} diff --git a/modules/tgw/cross-region-spoke/modules/vpc_routes/outputs.tf b/modules/tgw/cross-region-spoke/modules/vpc_routes/outputs.tf deleted file mode 100644 index 2f97ed8fc..000000000 --- a/modules/tgw/cross-region-spoke/modules/vpc_routes/outputs.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "aws_routes" { - value = aws_route.route -} diff --git a/modules/tgw/cross-region-spoke/outputs.tf b/modules/tgw/cross-region-spoke/outputs.tf deleted file mode 100644 index 605da986c..000000000 --- a/modules/tgw/cross-region-spoke/outputs.tf +++ /dev/null @@ -1,23 +0,0 @@ -output "vpc_routes_this" { - value = module.vpc_routes_this -} - -output "tgw_routes_in_region" { - value = module.tgw_routes_this_region -} - -output "vpc_routes_home" { - value = module.vpc_routes_home -} - -output "tgw_routes_home_region" { - value = module.tgw_routes_home_region -} -# -#output "tgw_this_region" { -# value = module.tgw_this_region -#} -# -#output "vpcs_this_region" { -# value = module.vpcs_this_region -#} diff --git a/modules/tgw/cross-region-spoke/versions.tf b/modules/tgw/cross-region-spoke/versions.tf deleted file mode 100644 index e89eb16ed..000000000 --- a/modules/tgw/cross-region-spoke/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.0" - } - } -} diff --git a/modules/tgw/hub/README.md b/modules/tgw/hub/README.md index 16871d5ae..08a17a0fd 100644 --- a/modules/tgw/hub/README.md +++ b/modules/tgw/hub/README.md @@ -1,6 +1,14 @@ +--- +tags: + - component/tgw/hub + - layer/network + - provider/aws +--- + # Component: `tgw/hub` -This component is responsible for provisioning an [AWS Transit Gateway](https://aws.amazon.com/transit-gateway) `hub` that acts as a centralized gateway for connecting VPCs from other `spoke` accounts. +This component is responsible for provisioning an [AWS Transit Gateway](https://aws.amazon.com/transit-gateway) `hub` +that acts as a centralized gateway for connecting VPCs from other `spoke` accounts. ## Usage @@ -11,28 +19,62 @@ Here's an example snippet for how to configure and use this component: ```yaml components: terraform: - tgw/hub: - settings: - spacelift: - workspace_enabled: true + tgw/hub/defaults: + metadata: + type: abstract + component: tgw/hub vars: enabled: true name: tgw-hub - eks_component_names: - - eks/cluster-blue - accounts_with_vpc: - - core-auto - - core-corp - - core-network - - plat-dev - - plat-staging - - plat-prod - - plat-sandbox - accounts_with_eks: - - plat-dev - - plat-staging - - plat-prod - - plat-sandbox + expose_eks_sg: false + tags: + Team: sre + Service: tgw-hub + + tgw/hub: + metadata: + inherits: + - tgw/hub/defaults + component: tgw/hub + vars: + connections: + - account: + tenant: core + stage: network + vpc_component_names: + - vpc-dev + - account: + tenant: core + stage: artifacts + - account: + tenant: core + stage: auto + eks_component_names: + - eks/cluster + - account: + tenant: plat + stage: dev + vpc_component_names: + - vpc + - vpc/data/1 + eks_component_names: + - eks/cluster + - account: + tenant: plat + stage: staging + vpc_component_names: + - vpc + - vpc/data/1 + eks_component_names: + - eks/cluster + - account: + tenant: plat + stage: prod + vpc_component_names: + - vpc + - vpc/data/1 + eks_component_names: + - eks/cluster ``` To provision the Transit Gateway and all related resources, run the following commands: @@ -42,13 +84,14 @@ atmos terraform plan tgw/hub -s --network atmos terraform apply tgw/hub -s --network ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.1 | +| [aws](#requirement\_aws) | >= 4.1 | ## Providers @@ -58,12 +101,12 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | -| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [account\_map](#module\_account\_map) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [eks](#module\_eks) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [tgw\_hub](#module\_tgw\_hub) | cloudposse/transit-gateway/aws | 0.9.1 | +| [tgw\_hub](#module\_tgw\_hub) | cloudposse/transit-gateway/aws | 0.11.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -76,26 +119,24 @@ No resources. | [account\_map\_environment\_name](#input\_account\_map\_environment\_name) | The name of the environment where `account_map` is provisioned | `string` | `"gbl"` | no | | [account\_map\_stage\_name](#input\_account\_map\_stage\_name) | The name of the stage where `account_map` is provisioned | `string` | `"root"` | no | | [account\_map\_tenant\_name](#input\_account\_map\_tenant\_name) | The name of the tenant where `account_map` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | -| [accounts\_with\_eks](#input\_accounts\_with\_eks) | Set of account names that have EKS | `set(string)` | n/a | yes | -| [accounts\_with\_vpc](#input\_accounts\_with\_vpc) | Set of account names that have VPC | `set(string)` | n/a | yes | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [allow\_external\_principals](#input\_allow\_external\_principals) | Set true to allow the TGW to be RAM shared with external principals specified in ram\_principals | `bool` | `false` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [connections](#input\_connections) | A list of objects to define each TGW connections.

By default, each connection will look for only the default `vpc` component. |
list(object({
account = object({
stage = string
environment = optional(string, "")
tenant = optional(string, "")
})
vpc_component_names = optional(list(string), ["vpc"])
eks_component_names = optional(list(string), [])
}))
| `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [eks\_component\_names](#input\_eks\_component\_names) | The names of the eks components | `set(string)` |
[
"eks/cluster"
]
| no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [expose\_eks\_sg](#input\_expose\_eks\_sg) | Set true to allow EKS clusters to accept traffic from source accounts | `bool` | `true` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [ram\_principals](#input\_ram\_principals) | A list of AWS account IDs to share the TGW with outside the organization | `list(string)` | `[]` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | @@ -113,9 +154,11 @@ No resources. | [transit\_gateway\_route\_table\_id](#output\_transit\_gateway\_route\_table\_id) | Transit Gateway route table ID | | [vpcs](#output\_vpcs) | Accounts with VPC and VPCs information | + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/tgw/hub) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/tgw/hub) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/tgw/hub/default.auto.tfvars b/modules/tgw/hub/default.auto.tfvars deleted file mode 100644 index 94153c134..000000000 --- a/modules/tgw/hub/default.auto.tfvars +++ /dev/null @@ -1,5 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - -name = "tgw-hub" diff --git a/modules/tgw/hub/main.tf b/modules/tgw/hub/main.tf index c18ef7711..ff08867e0 100644 --- a/modules/tgw/hub/main.tf +++ b/modules/tgw/hub/main.tf @@ -8,9 +8,11 @@ module "tgw_hub" { source = "cloudposse/transit-gateway/aws" - version = "0.9.1" + version = "0.11.0" ram_resource_share_enabled = true + ram_principals = var.ram_principals + allow_external_principals = var.allow_external_principals route_keys_enabled = true create_transit_gateway = true @@ -30,6 +32,5 @@ locals { vpcs = module.vpc eks = module.eks expose_eks_sg = var.expose_eks_sg - eks_component_names = var.eks_component_names } } diff --git a/modules/tgw/hub/providers.tf b/modules/tgw/hub/providers.tf index c2419aabb..89ed50a98 100644 --- a/modules/tgw/hub/providers.tf +++ b/modules/tgw/hub/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/tgw/hub/remote-state.tf b/modules/tgw/hub/remote-state.tf index d072d3f8d..0b861ee3c 100644 --- a/modules/tgw/hub/remote-state.tf +++ b/modules/tgw/hub/remote-state.tf @@ -1,27 +1,27 @@ locals { - accounts_with_eks = { - for account in var.accounts_with_eks : - account => module.account_map.outputs.account_info_map[account] - } + vpc_connections = flatten([for connection in var.connections : [ + for vpc_component_name in connection.vpc_component_names : { + stage = connection.account.stage + tenant = connection.account.tenant # Defaults to empty string if tenant isn't defined + environment = length(connection.account.environment) > 0 ? connection.account.environment : module.this.environment + component = vpc_component_name + }] + ]) - accounts_with_vpc = { - for account in var.accounts_with_vpc : - account => module.account_map.outputs.account_info_map[account] - } + eks_connections = flatten([for connection in var.connections : [ + for eks_component_name in connection.eks_component_names : { + stage = connection.account.stage + tenant = connection.account.tenant # Defaults to empty string if tenant isn't defined + environment = length(connection.account.environment) > 0 ? connection.account.environment : module.this.environment + component = eks_component_name + }] + ]) - # Create a map of accounts (- or ) and components - eks_remote_states = { - for account_component in setproduct(keys(local.accounts_with_eks), var.eks_component_names) : - join("-", account_component) => { - account = account_component[0] - component = account_component[1] - } - } } module "account_map" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" component = "account-map" environment = var.account_map_environment_name @@ -32,27 +32,39 @@ module "account_map" { } module "vpc" { - for_each = local.accounts_with_vpc + for_each = { for c in local.vpc_connections : + (length(c.tenant) > 0 ? "${c.tenant}-${c.environment}-${c.stage}-${c.component}" : "${c.environment}-${c.stage}-${c.component}") + => c } source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" + + component = each.value.component + stage = each.value.stage + environment = each.value.environment + tenant = lookup(each.value, "tenant", null) - component = "vpc" - stage = each.value.stage - tenant = lookup(each.value, "tenant", null) + defaults = { + stage = each.value.stage + environment = each.value.environment + tenant = lookup(each.value, "tenant", null) + } context = module.this.context } module "eks" { - source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + for_each = { for c in local.eks_connections : + (length(c.tenant) > 0 ? "${c.tenant}-${c.environment}-${c.stage}-${c.component}" : "${c.environment}-${c.stage}-${c.component}") + => c } - for_each = local.eks_remote_states + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" - component = each.value.component - stage = try(split("-", each.value.account)[1], each.value.account) - tenant = try(split("-", each.value.account)[0], null) + component = each.value.component + stage = each.value.stage + environment = each.value.environment + tenant = lookup(each.value, "tenant", null) defaults = { eks_cluster_managed_security_group_id = null diff --git a/modules/tgw/hub/variables.tf b/modules/tgw/hub/variables.tf index 889ad0d16..13bae4927 100644 --- a/modules/tgw/hub/variables.tf +++ b/modules/tgw/hub/variables.tf @@ -3,26 +3,28 @@ variable "region" { description = "AWS Region" } -variable "accounts_with_vpc" { - type = set(string) - description = "Set of account names that have VPC" -} - -variable "accounts_with_eks" { - type = set(string) - description = "Set of account names that have EKS" -} - variable "expose_eks_sg" { type = bool description = "Set true to allow EKS clusters to accept traffic from source accounts" default = true } -variable "eks_component_names" { - type = set(string) - description = "The names of the eks components" - default = ["eks/cluster"] +variable "connections" { + type = list(object({ + account = object({ + stage = string + environment = optional(string, "") + tenant = optional(string, "") + }) + vpc_component_names = optional(list(string), ["vpc"]) + eks_component_names = optional(list(string), []) + })) + description = <<-EOT + A list of objects to define each TGW connections. + + By default, each connection will look for only the default `vpc` component. + EOT + default = [] } variable "account_map_environment_name" { @@ -46,3 +48,15 @@ variable "account_map_tenant_name" { EOT default = null } + +variable "ram_principals" { + type = list(string) + description = "A list of AWS account IDs to share the TGW with outside the organization" + default = [] +} + +variable "allow_external_principals" { + type = bool + description = "Set true to allow the TGW to be RAM shared with external principals specified in ram_principals" + default = false +} diff --git a/modules/tgw/hub/versions.tf b/modules/tgw/hub/versions.tf index 99bf30a36..f0e7120a6 100644 --- a/modules/tgw/hub/versions.tf +++ b/modules/tgw/hub/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.1" + version = ">= 4.1" } } } diff --git a/modules/tgw/spoke/README.md b/modules/tgw/spoke/README.md index 02b28ecb0..0134e689a 100644 --- a/modules/tgw/spoke/README.md +++ b/modules/tgw/spoke/README.md @@ -1,6 +1,14 @@ +--- +tags: + - component/tgw/spoke + - layer/network + - provider/aws +--- + # Component: `tgw/spoke` -This component is responsible for provisioning [AWS Transit Gateway](https://aws.amazon.com/transit-gateway) attachments to connect VPCs in a `spoke` account to different accounts through a central `hub`. +This component is responsible for provisioning [AWS Transit Gateway](https://aws.amazon.com/transit-gateway) attachments +to connect VPCs in a `spoke` account to different accounts through a central `hub`. ## Usage @@ -17,25 +25,34 @@ components: metadata: type: abstract component: tgw/spoke - settings: - spacelift: - workspace_enabled: true vars: enabled: true name: tgw-spoke tags: Team: sre Service: tgw-spoke - root_account_tenant_name: core + expose_eks_sg: false + tgw_hub_tenant_name: core + tgw_hub_environment_name: ue1 tgw/spoke: metadata: inherits: - tgw/spoke-defaults vars: + # This is what THIS spoke is allowed to connect to. + # since this is deployed to each plat account (dev->prod), + # we allow connections to network and auto. connections: - - core-network - - core-auto + - account: + tenant: core + stage: network + # Set this value if the vpc component has a different name in this account + vpc_component_names: + - vpc-dev + - account: + tenant: core + stage: auto ``` stacks/ue2/dev.yaml @@ -52,10 +69,24 @@ components: expose_eks_sg: false # override default connections connections: - - core-network - - core-auto - - plat-staging - + - account: + tenant: core + stage: network + vpc_component_names: + - vpc-dev + - account: + tenant: core + stage: auto + - account: + tenant: plat + stage: dev + eks_component_names: + - eks/cluster + - account: + tenant: plat + stage: qa + eks_component_names: + - eks/cluster ``` To provision the attachments for a spoke account: @@ -65,32 +96,41 @@ atmos terraform plan tgw/spoke -s -- atmos terraform apply tgw/spoke -s -- ``` + ## Requirements | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.1 | +| [aws](#requirement\_aws) | >= 4.1 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 4.1 | +| [aws.tgw-hub](#provider\_aws.tgw-hub) | >= 4.1 | ## Modules | Name | Source | Version | |------|--------|---------| +| [cross\_region\_hub\_connector](#module\_cross\_region\_hub\_connector) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [iam\_roles](#module\_iam\_roles) | ../../account-map/modules/iam-roles | n/a | -| [tgw\_hub](#module\_tgw\_hub) | cloudposse/stack-config/yaml//modules/remote-state | 0.22.4 | +| [tgw\_hub](#module\_tgw\_hub) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [tgw\_hub\_role](#module\_tgw\_hub\_role) | ../../account-map/modules/iam-roles | n/a | -| [tgw\_hub\_routes](#module\_tgw\_hub\_routes) | cloudposse/transit-gateway/aws | 0.9.1 | +| [tgw\_hub\_routes](#module\_tgw\_hub\_routes) | cloudposse/transit-gateway/aws | 0.10.0 | | [tgw\_spoke\_vpc\_attachment](#module\_tgw\_spoke\_vpc\_attachment) | ./modules/standard_vpc_attachment | n/a | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | +| [vpc](#module\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources -No resources. +| Name | Type | +|------|------| +| [aws_route.back_route](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource | +| [aws_route.default_route](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource | ## Inputs @@ -98,40 +138,46 @@ No resources. |------|-------------|------|---------|:--------:| | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [connections](#input\_connections) | List of accounts to connect to | `list(string)` | n/a | yes | +| [connections](#input\_connections) | A list of objects to define each TGW connections.

By default, each connection will look for only the default `vpc` component. |
list(object({
account = object({
stage = string
environment = optional(string, "")
tenant = optional(string, "")
})
vpc_component_names = optional(list(string), ["vpc"])
eks_component_names = optional(list(string), [])
}))
| `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [cross\_region\_hub\_connector\_components](#input\_cross\_region\_hub\_connector\_components) | A map of cross-region hub connector components that provide this spoke with the appropriate Transit Gateway attachments IDs.
- The key should be the environment that the remote VPC is located in.
- The component is the name of the component in the remote region (e.g. `tgw/cross-region-hub-connector`)
- The environment is the region that the cross-region-hub-connector is deployed in.
e.g. the following would configure a component called `tgw/cross-region-hub-connector/use1` that is deployed in the
If use2 is the primary region, the following would be its configuration:
use1:
component: "tgw/cross-region-hub-connector"
environment: "use1" (the remote region)
and in the alternate region, the following would be its configuration:
use2:
component: "tgw/cross-region-hub-connector"
environment: "use1" (our own region) | `map(object({ component = string, environment = string }))` | `{}` | no | +| [default\_route\_enabled](#input\_default\_route\_enabled) | Enable default routing via transit gateway, requires also nat gateway and instance to be disabled in vpc component. Default is disabled. | `bool` | `false` | no | +| [default\_route\_outgoing\_account\_name](#input\_default\_route\_outgoing\_account\_name) | The account name which is used for outgoing traffic, when using the transit gateway as default route. | `string` | `null` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [eks\_component\_names](#input\_eks\_component\_names) | The names of the eks components | `set(string)` |
[
"eks/cluster"
]
| no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [expose\_eks\_sg](#input\_expose\_eks\_sg) | Set true to allow EKS clusters to accept traffic from source accounts | `bool` | `true` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | +| [own\_eks\_component\_names](#input\_own\_eks\_component\_names) | The name of the eks components in the owning account. | `list(string)` | `[]` | no | +| [own\_vpc\_component\_name](#input\_own\_vpc\_component\_name) | The name of the vpc component in the owning account. Defaults to "vpc" | `string` | `"vpc"` | no | +| [peered\_region](#input\_peered\_region) | Set `true` if this region is not the primary region | `bool` | `false` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [static\_routes](#input\_static\_routes) | A list of static routes to add to the transit gateway, pointing at this VPC as a destination. |
set(object({
blackhole = bool
destination_cidr_block = string
}))
| `[]` | no | +| [static\_tgw\_routes](#input\_static\_tgw\_routes) | A list of static routes to add to the local routing table with the transit gateway as a destination. | `list(string)` | `[]` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [tgw\_hub\_component\_name](#input\_tgw\_hub\_component\_name) | The name of the transit-gateway component | `string` | `"tgw/hub"` | no | -| [tgw\_hub\_environment\_name](#input\_tgw\_hub\_environment\_name) | The name of the environment where `tgw/gateway` is provisioned | `string` | `"ue2"` | no | -| [tgw\_hub\_stage\_name](#input\_tgw\_hub\_stage\_name) | The name of the stage where `tgw/gateway` is provisioned | `string` | `"network"` | no | +| [tgw\_hub\_stage\_name](#input\_tgw\_hub\_stage\_name) | The name of the stage where `tgw/hub` is provisioned | `string` | `"network"` | no | | [tgw\_hub\_tenant\_name](#input\_tgw\_hub\_tenant\_name) | The name of the tenant where `tgw/hub` is provisioned.

If the `tenant` label is not used, leave this as `null`. | `string` | `null` | no | ## Outputs No outputs. + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/tgw) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/tgw) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/tgw/spoke/default.auto.tfvars b/modules/tgw/spoke/default.auto.tfvars deleted file mode 100644 index 6ebfe4f02..000000000 --- a/modules/tgw/spoke/default.auto.tfvars +++ /dev/null @@ -1,5 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false - -name = "tgw-spoke" diff --git a/modules/tgw/spoke/main.tf b/modules/tgw/spoke/main.tf index ff1175909..42ad86c58 100644 --- a/modules/tgw/spoke/main.tf +++ b/modules/tgw/spoke/main.tf @@ -7,12 +7,14 @@ # https://docs.aws.amazon.com/ram/latest/userguide/getting-started-sharing.html locals { - spoke_account = module.this.tenant != null ? format("%s-%s", module.this.tenant, module.this.stage) : module.this.stage + spoke_account = module.this.tenant != null ? format("%s-%s-%s", module.this.tenant, module.this.environment, module.this.stage) : format("%s-%s", module.this.environment, module.this.stage) + // "When default routing via transit gateway is enabled, both nat gateway and nat instance must be disabled" + default_route_enabled_and_nat_disabled = module.this.enabled && var.default_route_enabled && length(module.vpc.outputs.nat_gateway_ids) == 0 && length(module.vpc.outputs.nat_instance_ids) == 0 } module "tgw_hub_routes" { source = "cloudposse/transit-gateway/aws" - version = "0.9.1" + version = "0.10.0" providers = { aws = aws.tgw-hub @@ -38,12 +40,40 @@ module "tgw_hub_routes" { module "tgw_spoke_vpc_attachment" { source = "./modules/standard_vpc_attachment" - owning_account = local.spoke_account + owning_account = local.spoke_account + own_vpc_component_name = var.own_vpc_component_name + own_eks_component_names = var.own_eks_component_names - tgw_config = module.tgw_hub.outputs.tgw_config - connections = var.connections - expose_eks_sg = var.expose_eks_sg - eks_component_names = var.eks_component_names + tgw_config = module.tgw_hub.outputs.tgw_config + tgw_connector_config = module.cross_region_hub_connector + connections = var.connections + expose_eks_sg = var.expose_eks_sg + peered_region = var.peered_region + static_routes = var.static_routes + static_tgw_routes = var.static_tgw_routes context = module.this.context } + +resource "aws_route" "default_route" { + count = local.default_route_enabled_and_nat_disabled ? length(module.vpc.outputs.private_route_table_ids) : 0 + + route_table_id = module.vpc.outputs.private_route_table_ids[count.index] + destination_cidr_block = "0.0.0.0/0" + transit_gateway_id = module.tgw_hub.outputs.transit_gateway_id +} + +locals { + outgoing_network_account_name = local.default_route_enabled_and_nat_disabled ? format("%s-%s", var.default_route_outgoing_account_name, var.own_vpc_component_name) : "" + default_route_vpc_public_route_table_ids = local.default_route_enabled_and_nat_disabled ? module.tgw_hub.outputs.vpcs[local.outgoing_network_account_name].outputs.public_route_table_ids : [] +} + +resource "aws_route" "back_route" { + provider = aws.tgw-hub + + count = local.default_route_enabled_and_nat_disabled ? length(local.default_route_vpc_public_route_table_ids) : 0 + + route_table_id = local.default_route_vpc_public_route_table_ids[count.index] + destination_cidr_block = module.vpc.outputs.vpc_cidr + transit_gateway_id = module.tgw_hub.outputs.transit_gateway_id +} diff --git a/modules/tgw/spoke/modules/standard_vpc_attachment/default.auto.tfvars b/modules/tgw/spoke/modules/standard_vpc_attachment/default.auto.tfvars deleted file mode 100644 index 67952b0d1..000000000 --- a/modules/tgw/spoke/modules/standard_vpc_attachment/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = true diff --git a/modules/tgw/spoke/modules/standard_vpc_attachment/main.tf b/modules/tgw/spoke/modules/standard_vpc_attachment/main.tf index ac5df9e8c..634975bc3 100644 --- a/modules/tgw/spoke/modules/standard_vpc_attachment/main.tf +++ b/modules/tgw/spoke/modules/standard_vpc_attachment/main.tf @@ -1,32 +1,124 @@ locals { - vpcs = var.tgw_config.vpcs - own_vpc = local.vpcs[var.owning_account].outputs - connected_accounts = var.connections - - # Create a list of all of the EKS security groups - own_eks_sgs = compact([ - for account_component in setproduct([var.owning_account], var.eks_component_names) : - try(var.tgw_config.eks[join("-", account_component)].outputs.eks_cluster_managed_security_group_id, "") - ]) + vpcs = var.tgw_config.vpcs + eks = var.tgw_config.eks - # Create a map of accounts (- or ) and the security group to add ingress rules for - connected_accounts_allow_ingress = { - for account_sg in setproduct(local.connected_accounts, local.own_eks_sgs) : - account_sg[0] => { - account = account_sg[0] - sg = account_sg[1] - } + own_account_vpc_key = "${var.owning_account}-${var.own_vpc_component_name}" + own_vpc = local.vpcs[local.own_account_vpc_key].outputs + is_network_hub = (module.this.stage == var.network_account_stage_name) ? true : false + + # Create a list of all VPC component keys. Key includes stack + component + # + # Example var.connections + # connections: + # - account: + # tenant: core + # stage: network + # vpc_component_names: + # - vpc-dev + # - account: + # tenant: core + # stage: auto + # - account: + # tenant: plat + # stage: dev + # - account: + # tenant: plat + # stage: dev + # environment: usw2 + connected_vpc_component_keys = flatten( + [ + for c in var.connections : + [ + # Default value for c.vpc_component_names is ["vpc"] + for vpc in c.vpc_component_names : + # This component key needs to match the key created by tgw/hub + # See components/terraform/tgw/hub/remote-state.tf + length(c.account.environment) > 0 ? + (length(c.account.tenant) > 0 ? + "${c.account.tenant}-${c.account.environment}-${c.account.stage}-${vpc}" : + "${c.account.environment}-${c.account.stage}-${vpc}") + : + (length(c.account.tenant) > 0 ? + "${c.account.tenant}-${module.this.environment}-${c.account.stage}-${vpc}" : + "${module.this.environment}-${c.account.stage}-${vpc}") + ] + ] + ) + + # Create a list of all EKS component keys. + # Follows same pattern as vpc_component_names + connected_eks_component_keys = flatten( + [ + for c in var.connections : + [ + for eks in c.eks_component_names : + length(c.account.environment) > 0 ? + (length(c.account.tenant) > 0 ? + "${c.account.tenant}-${c.account.environment}-${c.account.stage}-${eks}" : + "${c.account.environment}-${c.account.stage}-${eks}") + : + (length(c.account.tenant) > 0 ? + "${c.account.tenant}-${module.this.environment}-${c.account.stage}-${eks}" : + "${module.this.environment}-${c.account.stage}-${eks}") + ] + ] + ) + + # Define a list of all VPCs allowed to access this account's VPC. + # Filter the tgw_config output from tgw/hub for VPCs and pull the CIDR of a VPC if + # (1) this is not the primary VPC that we are connecting to and (2) this VPC key is given as a connection + allowed_vpcs = { + for vpc_key, vpc_remote_state in local.vpcs : + vpc_key => { + cidr = vpc_remote_state.outputs.vpc_cidr + cross_region = (vpc_remote_state.outputs.environment != module.this.environment) + environment = vpc_remote_state.outputs.environment + } if vpc_key != local.own_account_vpc_key && contains(local.connected_vpc_component_keys, vpc_key) } - allowed_cidrs = [ - for k, v in local.vpcs : v.outputs.vpc_cidr - if contains(local.connected_accounts, k) && k != var.owning_account - ] + # For each EKS cluster in this account, map the EKS SG to the CIDR for each connected cluster + allowed_eks = merge([ + for own_eks_component in var.own_eks_component_names : + { + for eks_key, eks_remote_state in local.eks : + eks_key => { + # SG of each EKS component in this account + sg_id = local.eks["${var.owning_account}-${own_eks_component}"].outputs.eks_cluster_managed_security_group_id + # CIDR of the remote EKS cluster + cidr = eks_remote_state.outputs.vpc_cidr + } if contains(local.connected_eks_component_keys, eks_key) + } + ]...) + + cross_region_vpcs = flatten([ + for vpc_key, vpc in local.allowed_vpcs : [ + { + vpc_key = vpc_key + cidr = vpc.cidr + environment = vpc.environment + } + ] if vpc.cross_region + ]) + + cross_region_vpc_route_table_ids = flatten([ + for vpc_key, vpc in local.allowed_vpcs : [ + for route_table_key, route_table_id in local.own_vpc.private_route_table_ids : [ + { + vpc_key = vpc_key + rt_key = route_table_key + cidr = vpc.cidr + route_table_id = route_table_id + } + ] + ] if vpc.cross_region + ]) } +# Create a TGW attachment from this account's VPC to the TGW Hub +# This includes a merged list of all CIDRs from allowed VPCs in connected accounts module "standard_vpc_attachment" { source = "cloudposse/transit-gateway/aws" - version = "0.9.1" + version = "0.11.0" existing_transit_gateway_id = var.tgw_config.existing_transit_gateway_id existing_transit_gateway_route_table_id = var.tgw_config.existing_transit_gateway_route_table_id @@ -44,23 +136,53 @@ module "standard_vpc_attachment" { subnet_ids = local.own_vpc.private_subnet_ids subnet_route_table_ids = local.own_vpc.private_route_table_ids route_to = null - static_routes = null + static_routes = var.static_routes transit_gateway_vpc_attachment_id = null - route_to_cidr_blocks = local.allowed_cidrs + route_to_cidr_blocks = concat([for vpc in local.allowed_vpcs : vpc.cidr if !vpc.cross_region], var.static_tgw_routes) } } context = module.this.context } +# Create a TGW attachment for a Peering Connection +# This in only necessary in the hub accounts +resource "aws_ec2_transit_gateway_route" "peering_connection" { + for_each = local.is_network_hub ? { + for vpc in local.cross_region_vpcs : vpc.cidr => vpc + } : {} + + # Use the TGW Attachment in the alternate, peered region + transit_gateway_attachment_id = var.peered_region ? var.tgw_connector_config[local.own_vpc.environment].outputs.aws_ec2_transit_gateway_peering_attachment_id : var.tgw_connector_config[each.value.environment].outputs.aws_ec2_transit_gateway_peering_attachment_id + + blackhole = false + destination_cidr_block = each.value.cidr + transit_gateway_route_table_id = var.tgw_config.existing_transit_gateway_route_table_id +} + +# Route this VPC to the destination CIDR +# This is only necessary in cross-region connections +resource "aws_route" "peering_connection" { + for_each = { + for vpc_rt in local.cross_region_vpc_route_table_ids : "${vpc_rt.route_table_id}:${vpc_rt.cidr}" => vpc_rt + } + + transit_gateway_id = var.tgw_config.existing_transit_gateway_id + + route_table_id = each.value.route_table_id + destination_cidr_block = each.value.cidr +} + +# Define a Security Group Rule to allow traffic from +# Expose traffic from EKS VPC CIDRs in other accounts to this accounts EKS cluster SG resource "aws_security_group_rule" "ingress_cidr_blocks" { - for_each = var.expose_eks_sg ? local.connected_accounts_allow_ingress : {} + for_each = var.expose_eks_sg ? local.allowed_eks : {} description = "Allow inbound traffic from ${each.key}" type = "ingress" from_port = 0 to_port = 65535 protocol = "tcp" - cidr_blocks = [local.vpcs[each.value.account].outputs.vpc_cidr] - security_group_id = each.value.sg + cidr_blocks = [each.value.cidr] # CIDR of cluster in other accounts + security_group_id = each.value.sg_id # SG of cluster in this account } diff --git a/modules/tgw/spoke/modules/standard_vpc_attachment/outputs.tf b/modules/tgw/spoke/modules/standard_vpc_attachment/outputs.tf index dca466b3c..3c21a6725 100644 --- a/modules/tgw/spoke/modules/standard_vpc_attachment/outputs.tf +++ b/modules/tgw/spoke/modules/standard_vpc_attachment/outputs.tf @@ -7,7 +7,8 @@ output "tg_config" { subnet_route_table_ids = null route_to = null route_to_cidr_blocks = null - static_routes = null + static_routes = var.static_routes transit_gateway_vpc_attachment_id = module.standard_vpc_attachment.transit_gateway_vpc_attachment_ids[var.owning_account] } + description = "Transit Gateway configuration formatted for handling" } diff --git a/modules/tgw/spoke/modules/standard_vpc_attachment/variables.tf b/modules/tgw/spoke/modules/standard_vpc_attachment/variables.tf index d8c88fb3b..8722bbd02 100644 --- a/modules/tgw/spoke/modules/standard_vpc_attachment/variables.tf +++ b/modules/tgw/spoke/modules/standard_vpc_attachment/variables.tf @@ -4,6 +4,18 @@ variable "owning_account" { description = "The name of the account that owns the VPC being attached" } +variable "own_vpc_component_name" { + type = string + default = "vpc" + description = "The name of the vpc component in the owning account. Defaults to \"vpc\"" +} + +variable "own_eks_component_names" { + type = list(string) + default = [] + description = "The name of the eks components in the owning account." +} + variable "tgw_config" { type = object({ existing_transit_gateway_id = string @@ -14,9 +26,45 @@ variable "tgw_config" { description = "Object to pass common data from root module to this submodule. See root module for details" } +variable "tgw_connector_config" { + type = map(any) + description = "Map of output from all `tgw/cross-region-hub-connector` components. See root module for details" + default = {} +} + variable "connections" { + type = list(object({ + account = object({ + stage = string + environment = optional(string, "") + tenant = optional(string, "") + }) + vpc_component_names = optional(list(string), ["vpc"]) + eks_component_names = optional(list(string), []) + })) + description = <<-EOT + A list of objects to define each TGW connections. + + By default, each connection will look for only the default `vpc` component. + EOT + default = [] +} + +variable "static_routes" { + type = set(object({ + blackhole = bool + destination_cidr_block = string + })) + description = <<-EOT + A list of static routes. + EOT + default = [] +} + +variable "static_tgw_routes" { type = list(string) - description = "List of accounts to connect to" + description = "A list of static routes to add to the local routing table with the transit gateway as a destination." + default = [] } variable "expose_eks_sg" { @@ -25,8 +73,14 @@ variable "expose_eks_sg" { default = true } -variable "eks_component_names" { - type = set(string) - description = "The names of the eks components" - default = ["eks/cluster"] +variable "peered_region" { + type = bool + description = "Set `true` if this region is not the primary region" + default = false +} + +variable "network_account_stage_name" { + type = string + description = "The name of the stage designated as the network hub" + default = "network" } diff --git a/modules/tgw/spoke/modules/standard_vpc_attachment/versions.tf b/modules/tgw/spoke/modules/standard_vpc_attachment/versions.tf index e89eb16ed..f33ede77f 100644 --- a/modules/tgw/spoke/modules/standard_vpc_attachment/versions.tf +++ b/modules/tgw/spoke/modules/standard_vpc_attachment/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 4.0" } } } diff --git a/modules/tgw/spoke/provider-hub.tf b/modules/tgw/spoke/provider-hub.tf new file mode 100644 index 000000000..a1f429f85 --- /dev/null +++ b/modules/tgw/spoke/provider-hub.tf @@ -0,0 +1,24 @@ +provider "aws" { + alias = "tgw-hub" + region = var.region + + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.tgw_hub_role.terraform_profile_name + + dynamic "assume_role" { + # module.tgw_hub_role.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.tgw_hub_role.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } +} + +module "tgw_hub_role" { + source = "../../account-map/modules/iam-roles" + + stage = var.tgw_hub_stage_name + tenant = var.tgw_hub_tenant_name + + context = module.this.context +} diff --git a/modules/tgw/spoke/providers.tf b/modules/tgw/spoke/providers.tf index bfa49d241..89ed50a98 100644 --- a/modules/tgw/spoke/providers.tf +++ b/modules/tgw/spoke/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,56 +17,3 @@ module "iam_roles" { source = "../../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} - -provider "aws" { - alias = "tgw-hub" - region = var.region - - assume_role { - role_arn = coalesce(var.import_role_arn, module.tgw_hub_role.terraform_role_arn) - } -} - -variable "tgw_hub_environment_name" { - type = string - description = "The name of the environment where `tgw/gateway` is provisioned" - default = "ue2" -} - -variable "tgw_hub_stage_name" { - type = string - description = "The name of the stage where `tgw/gateway` is provisioned" - default = "network" -} - -variable "tgw_hub_tenant_name" { - type = string - description = <<-EOT - The name of the tenant where `tgw/hub` is provisioned. - - If the `tenant` label is not used, leave this as `null`. - EOT - default = null -} - -module "tgw_hub_role" { - source = "../../account-map/modules/iam-roles" - - stage = var.tgw_hub_stage_name - environment = var.tgw_hub_environment_name - tenant = var.tgw_hub_tenant_name - - context = module.this.context -} diff --git a/modules/tgw/spoke/remote-state.tf b/modules/tgw/spoke/remote-state.tf index a66b0cd9f..709e9222d 100644 --- a/modules/tgw/spoke/remote-state.tf +++ b/modules/tgw/spoke/remote-state.tf @@ -1,11 +1,37 @@ module "tgw_hub" { source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "0.22.4" + version = "1.5.0" - component = var.tgw_hub_component_name - stage = var.tgw_hub_stage_name - environment = var.tgw_hub_environment_name - tenant = var.tgw_hub_tenant_name + component = var.tgw_hub_component_name + tenant = length(var.tgw_hub_tenant_name) > 0 ? var.tgw_hub_tenant_name : module.this.tenant + stage = length(var.tgw_hub_stage_name) > 0 ? var.tgw_hub_stage_name : module.this.stage + + context = module.this.context +} + +module "vpc" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + component = var.own_vpc_component_name + + context = module.this.context +} + +module "cross_region_hub_connector" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + for_each = var.cross_region_hub_connector_components + + component = each.value.component + tenant = length(var.tgw_hub_tenant_name) > 0 ? var.tgw_hub_tenant_name : module.this.tenant + stage = length(var.tgw_hub_stage_name) > 0 ? var.tgw_hub_stage_name : module.this.stage + environment = each.value.environment + + # Ignore if hub connector doesnt exist (it doesnt exist in primary region) + ignore_errors = true + defaults = {} context = module.this.context } diff --git a/modules/tgw/spoke/variables.tf b/modules/tgw/spoke/variables.tf index a34bd9890..a4eec4e41 100644 --- a/modules/tgw/spoke/variables.tf +++ b/modules/tgw/spoke/variables.tf @@ -4,8 +4,21 @@ variable "region" { } variable "connections" { - type = list(string) - description = "List of accounts to connect to" + type = list(object({ + account = object({ + stage = string + environment = optional(string, "") + tenant = optional(string, "") + }) + vpc_component_names = optional(list(string), ["vpc"]) + eks_component_names = optional(list(string), []) + })) + description = <<-EOT + A list of objects to define each TGW connections. + + By default, each connection will look for only the default `vpc` component. + EOT + default = [] } variable "tgw_hub_component_name" { @@ -14,14 +27,89 @@ variable "tgw_hub_component_name" { default = "tgw/hub" } +variable "tgw_hub_stage_name" { + type = string + description = "The name of the stage where `tgw/hub` is provisioned" + default = "network" +} + +variable "tgw_hub_tenant_name" { + type = string + description = <<-EOT + The name of the tenant where `tgw/hub` is provisioned. + + If the `tenant` label is not used, leave this as `null`. + EOT + default = null +} + variable "expose_eks_sg" { type = bool description = "Set true to allow EKS clusters to accept traffic from source accounts" default = true } -variable "eks_component_names" { - type = set(string) - description = "The names of the eks components" - default = ["eks/cluster"] +variable "own_vpc_component_name" { + type = string + default = "vpc" + description = "The name of the vpc component in the owning account. Defaults to \"vpc\"" +} + +variable "own_eks_component_names" { + type = list(string) + default = [] + description = "The name of the eks components in the owning account." +} + +variable "peered_region" { + type = bool + description = "Set `true` if this region is not the primary region" + default = false +} + +variable "static_routes" { + type = set(object({ + blackhole = bool + destination_cidr_block = string + })) + description = "A list of static routes to add to the transit gateway, pointing at this VPC as a destination." + default = [] +} + +variable "static_tgw_routes" { + type = list(string) + description = "A list of static routes to add to the local routing table with the transit gateway as a destination." + default = [] +} + +variable "default_route_enabled" { + type = bool + description = "Enable default routing via transit gateway, requires also nat gateway and instance to be disabled in vpc component. Default is disabled." + default = false +} + +variable "default_route_outgoing_account_name" { + type = string + description = "The account name which is used for outgoing traffic, when using the transit gateway as default route." + default = null +} + +variable "cross_region_hub_connector_components" { + type = map(object({ component = string, environment = string })) + description = <<-EOT + A map of cross-region hub connector components that provide this spoke with the appropriate Transit Gateway attachments IDs. + - The key should be the environment that the remote VPC is located in. + - The component is the name of the component in the remote region (e.g. `tgw/cross-region-hub-connector`) + - The environment is the region that the cross-region-hub-connector is deployed in. + e.g. the following would configure a component called `tgw/cross-region-hub-connector/use1` that is deployed in the + If use2 is the primary region, the following would be its configuration: + use1: + component: "tgw/cross-region-hub-connector" + environment: "use1" (the remote region) + and in the alternate region, the following would be its configuration: + use2: + component: "tgw/cross-region-hub-connector" + environment: "use1" (our own region) + EOT + default = {} } diff --git a/modules/tgw/spoke/versions.tf b/modules/tgw/spoke/versions.tf index 99bf30a36..f0e7120a6 100644 --- a/modules/tgw/spoke/versions.tf +++ b/modules/tgw/spoke/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.1" + version = ">= 4.1" } } } diff --git a/modules/vpc-flow-logs-bucket/README.md b/modules/vpc-flow-logs-bucket/README.md index 9c4e91fc1..f831f858c 100644 --- a/modules/vpc-flow-logs-bucket/README.md +++ b/modules/vpc-flow-logs-bucket/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/vpc-flow-logs-bucket + - layer/network + - provider/aws +--- + # Component: `vpc-flow-logs-bucket` This component is responsible for provisioning an encrypted S3 bucket which is configured to receive VPC Flow Logs. @@ -8,7 +15,8 @@ This component is responsible for provisioning an encrypted S3 bucket which is c Here's an example snippet for how to use this component. -**IMPORTANT**: This component expects the `aws_flow_log` resource to be created externally. Typically that is accomplished through [the `vpc` component](../vpc/). +**IMPORTANT**: This component expects the `aws_flow_log` resource to be created externally. Typically that is +accomplished through [the `vpc` component](../vpc/). ```yaml components: @@ -23,15 +31,14 @@ components: expiration_days: 365 ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 0.13.0 | -| [aws](#requirement\_aws) | >= 3.0 | -| [local](#requirement\_local) | >= 1.3 | -| [template](#requirement\_template) | >= 2.0 | +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | >= 4.9.0 | ## Providers @@ -41,9 +48,9 @@ No providers. | Name | Source | Version | |------|--------|---------| -| [flow\_logs\_s3\_bucket](#module\_flow\_logs\_s3\_bucket) | cloudposse/vpc-flow-logs-s3-bucket/aws | 0.12.0 | +| [flow\_logs\_s3\_bucket](#module\_flow\_logs\_s3\_bucket) | cloudposse/vpc-flow-logs-s3-bucket/aws | 1.0.1 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [this](#module\_this) | cloudposse/label/null | 0.24.1 | +| [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -53,33 +60,34 @@ No resources. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional tags for appending to tags\_as\_list\_of\_maps. Not added to `tags`. | `map(string)` | `{}` | no | -| [arn\_format](#input\_arn\_format) | ARN format to be used. May be changed to support deployment in GovCloud/China regions | `string` | `"arn:aws"` | no | -| [attributes](#input\_attributes) | Additional attributes (e.g. `1`) | `list(string)` | `[]` | no | -| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {}
}
| no | -| [delimiter](#input\_delimiter) | Delimiter to be used between `namespace`, `environment`, `stage`, `name` and `attributes`.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | +| [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | +| [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | -| [environment](#input\_environment) | Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | +| [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [expiration\_days](#input\_expiration\_days) | Number of days after which to expunge the objects | `number` | `90` | no | | [force\_destroy](#input\_force\_destroy) | A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are not recoverable | `bool` | `false` | no | | [glacier\_transition\_days](#input\_glacier\_transition\_days) | Number of days after which to move the data to the glacier storage tier | `number` | `60` | no | -| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for default, which is `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile to use when importing a resource | `string` | `null` | no | -| [label\_key\_case](#input\_label\_key\_case) | The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | -| [label\_order](#input\_label\_order) | The naming order of the id output and Name tag.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 5 elements, but at least one must be present. | `list(string)` | `null` | no | -| [label\_value\_case](#input\_label\_value\_case) | The letter case of output label values (also used in `tags` and `id`).
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Default value: `lower`. | `string` | `null` | no | +| [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | +| [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | +| [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | +| [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | +| [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [lifecycle\_prefix](#input\_lifecycle\_prefix) | Prefix filter. Used to manage object lifecycle events | `string` | `""` | no | | [lifecycle\_rule\_enabled](#input\_lifecycle\_rule\_enabled) | Enable lifecycle events on this bucket | `bool` | `true` | no | | [lifecycle\_tags](#input\_lifecycle\_tags) | Tags filter. Used to manage object lifecycle events | `map(string)` | `{}` | no | -| [name](#input\_name) | Solution name, e.g. 'app' or 'jenkins' | `string` | `null` | no | -| [namespace](#input\_namespace) | Namespace, which could be your organization name or abbreviation, e.g. 'eg' or 'cp' | `string` | `null` | no | +| [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | +| [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [noncurrent\_version\_expiration\_days](#input\_noncurrent\_version\_expiration\_days) | Specifies when noncurrent object versions expire | `number` | `90` | no | | [noncurrent\_version\_transition\_days](#input\_noncurrent\_version\_transition\_days) | Specifies when noncurrent object versions transitions | `number` | `30` | no | -| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Regex to replace chars with empty string in `namespace`, `environment`, `stage` and `name`.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | +| [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [stage](#input\_stage) | Stage, e.g. 'prod', 'staging', 'dev', OR 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | +| [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [standard\_transition\_days](#input\_standard\_transition\_days) | Number of days to persist in the standard storage tier before moving to the infrequent access tier | `number` | `30` | no | -| [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit','XYZ')` | `map(string)` | `{}` | no | +| [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | +| [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [traffic\_type](#input\_traffic\_type) | The type of traffic to capture. Valid values: `ACCEPT`, `REJECT`, `ALL` | `string` | `"ALL"` | no | ## Outputs @@ -89,10 +97,11 @@ No resources. | [vpc\_flow\_logs\_bucket\_arn](#output\_vpc\_flow\_logs\_bucket\_arn) | VPC Flow Logs bucket ARN | | [vpc\_flow\_logs\_bucket\_id](#output\_vpc\_flow\_logs\_bucket\_id) | VPC Flow Logs bucket ID | - + ## References - * [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/vpc-flow-logs-bucket) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/vpc-flow-logs-bucket) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/vpc-flow-logs-bucket/context.tf b/modules/vpc-flow-logs-bucket/context.tf index 81f99b4e3..5e0ef8856 100644 --- a/modules/vpc-flow-logs-bucket/context.tf +++ b/modules/vpc-flow-logs-bucket/context.tf @@ -8,6 +8,8 @@ # Cloud Posse's standard configuration inputs suitable for passing # to Cloud Posse modules. # +# curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf +# # Modules should access the whole context as `module.this.context` # to get the input variables with nulls for defaults, # for example `context = module.this.context`, @@ -20,10 +22,11 @@ module "this" { source = "cloudposse/label/null" - version = "0.24.1" # requires Terraform >= 0.13.0 + version = "0.25.0" # requires Terraform >= 0.13.0 enabled = var.enabled namespace = var.namespace + tenant = var.tenant environment = var.environment stage = var.stage name = var.name @@ -36,6 +39,8 @@ module "this" { id_length_limit = var.id_length_limit label_key_case = var.label_key_case label_value_case = var.label_value_case + descriptor_formats = var.descriptor_formats + labels_as_tags = var.labels_as_tags context = var.context } @@ -47,6 +52,7 @@ variable "context" { default = { enabled = true namespace = null + tenant = null environment = null stage = null name = null @@ -59,6 +65,15 @@ variable "context" { id_length_limit = null label_key_case = null label_value_case = null + descriptor_formats = {} + # Note: we have to use [] instead of null for unset lists due to + # https://github.com/hashicorp/terraform/issues/28137 + # which was not fixed until Terraform 1.0.0, + # but we want the default to be all the labels in `label_order` + # and we want users to be able to prevent all tag generation + # by setting `labels_as_tags` to `[]`, so we need + # a different sentinel to indicate "default" + labels_as_tags = ["unset"] } description = <<-EOT Single object for setting entire context at once. @@ -88,32 +103,42 @@ variable "enabled" { variable "namespace" { type = string default = null - description = "Namespace, which could be your organization name or abbreviation, e.g. 'eg' or 'cp'" + description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique" +} + +variable "tenant" { + type = string + default = null + description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for" } variable "environment" { type = string default = null - description = "Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT'" + description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'" } variable "stage" { type = string default = null - description = "Stage, e.g. 'prod', 'staging', 'dev', OR 'source', 'build', 'test', 'deploy', 'release'" + description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'" } variable "name" { type = string default = null - description = "Solution name, e.g. 'app' or 'jenkins'" + description = <<-EOT + ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'. + This is the only ID element not also included as a `tag`. + The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. + EOT } variable "delimiter" { type = string default = null description = <<-EOT - Delimiter to be used between `namespace`, `environment`, `stage`, `name` and `attributes`. + Delimiter to be used between ID elements. Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. EOT } @@ -121,36 +146,64 @@ variable "delimiter" { variable "attributes" { type = list(string) default = [] - description = "Additional attributes (e.g. `1`)" + description = <<-EOT + ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`, + in the order they appear in the list. New attributes are appended to the + end of the list. The elements of the list are joined by the `delimiter` + and treated as a single ID element. + EOT +} + +variable "labels_as_tags" { + type = set(string) + default = ["default"] + description = <<-EOT + Set of labels (ID elements) to include as tags in the `tags` output. + Default is to include all labels. + Tags with empty values will not be included in the `tags` output. + Set to `[]` to suppress all generated tags. + **Notes:** + The value of the `name` tag, if included, will be the `id`, not the `name`. + Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be + changed in later chained modules. Attempts to change it will be silently ignored. + EOT } variable "tags" { type = map(string) default = {} - description = "Additional tags (e.g. `map('BusinessUnit','XYZ')`" + description = <<-EOT + Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`). + Neither the tag keys nor the tag values will be modified by this module. + EOT } variable "additional_tag_map" { type = map(string) default = {} - description = "Additional tags for appending to tags_as_list_of_maps. Not added to `tags`." + description = <<-EOT + Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`. + This is for some rare cases where resources want additional configuration of tags + and therefore take a list of maps with tag key, value, and additional configuration. + EOT } variable "label_order" { type = list(string) default = null description = <<-EOT - The naming order of the id output and Name tag. + The order in which the labels (ID elements) appear in the `id`. Defaults to ["namespace", "environment", "stage", "name", "attributes"]. - You can omit any of the 5 elements, but at least one must be present. - EOT + You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. + EOT } variable "regex_replace_chars" { type = string default = null description = <<-EOT - Regex to replace chars with empty string in `namespace`, `environment`, `stage` and `name`. + Terraform regular expression (regex) string. + Characters matching the regex will be removed from the ID elements. If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. EOT } @@ -161,7 +214,7 @@ variable "id_length_limit" { description = <<-EOT Limit `id` to this many characters (minimum 6). Set to `0` for unlimited length. - Set to `null` for default, which is `0`. + Set to `null` for keep the existing setting, which defaults to `0`. Does not affect `id_full`. EOT validation { @@ -174,7 +227,8 @@ variable "label_key_case" { type = string default = null description = <<-EOT - The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`. + Controls the letter case of the `tags` keys (label names) for tags generated by this module. + Does not affect keys of tags passed in via the `tags` input. Possible values: `lower`, `title`, `upper`. Default value: `title`. EOT @@ -189,8 +243,11 @@ variable "label_value_case" { type = string default = null description = <<-EOT - The letter case of output label values (also used in `tags` and `id`). + Controls the letter case of ID elements (labels) as included in `id`, + set as tag values, and output by this module individually. + Does not affect values of tags passed in via the `tags` input. Possible values: `lower`, `title`, `upper` and `none` (no transformation). + Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs. Default value: `lower`. EOT @@ -199,4 +256,24 @@ variable "label_value_case" { error_message = "Allowed values: `lower`, `title`, `upper`, `none`." } } + +variable "descriptor_formats" { + type = any + default = {} + description = <<-EOT + Describe additional descriptors to be output in the `descriptors` output map. + Map of maps. Keys are names of descriptors. Values are maps of the form + `{ + format = string + labels = list(string) + }` + (Type is `any` so the map values can later be enhanced to provide additional options.) + `format` is a Terraform format string to be passed to the `format()` function. + `labels` is a list of labels, in order, to pass to `format()` function. + Label values will be normalized before being passed to `format()` so they will be + identical to how they appear in `id`. + Default is `{}` (`descriptors` output will be empty). + EOT +} + #### End of copy of cloudposse/terraform-null-label/variables.tf diff --git a/modules/vpc-flow-logs-bucket/default.auto.tfvars b/modules/vpc-flow-logs-bucket/default.auto.tfvars deleted file mode 100644 index 67952b0d1..000000000 --- a/modules/vpc-flow-logs-bucket/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = true diff --git a/modules/vpc-flow-logs-bucket/main.tf b/modules/vpc-flow-logs-bucket/main.tf index 22486216e..88eaa98fe 100644 --- a/modules/vpc-flow-logs-bucket/main.tf +++ b/modules/vpc-flow-logs-bucket/main.tf @@ -1,6 +1,6 @@ module "flow_logs_s3_bucket" { source = "cloudposse/vpc-flow-logs-s3-bucket/aws" - version = "0.12.0" + version = "1.0.1" lifecycle_prefix = var.lifecycle_prefix lifecycle_tags = var.lifecycle_tags @@ -12,7 +12,6 @@ module "flow_logs_s3_bucket" { expiration_days = var.expiration_days traffic_type = var.traffic_type force_destroy = var.force_destroy - arn_format = var.arn_format flow_log_enabled = false context = module.this.context diff --git a/modules/vpc-flow-logs-bucket/providers.tf b/modules/vpc-flow-logs-bucket/providers.tf index 1aa5d23ec..ef923e10a 100644 --- a/modules/vpc-flow-logs-bucket/providers.tf +++ b/modules/vpc-flow-logs-bucket/providers.tf @@ -1,18 +1,19 @@ provider "aws" { region = var.region - # `terraform import` will not use data from a data source, so on import we have to explicitly specify the profile - profile = coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } + } } module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile to use when importing a resource" -} - diff --git a/modules/vpc-flow-logs-bucket/variables.tf b/modules/vpc-flow-logs-bucket/variables.tf index f7b9dda6d..6b87353ed 100644 --- a/modules/vpc-flow-logs-bucket/variables.tf +++ b/modules/vpc-flow-logs-bucket/variables.tf @@ -62,9 +62,3 @@ variable "traffic_type" { description = "The type of traffic to capture. Valid values: `ACCEPT`, `REJECT`, `ALL`" default = "ALL" } - -variable "arn_format" { - type = string - default = "arn:aws" - description = "ARN format to be used. May be changed to support deployment in GovCloud/China regions" -} diff --git a/modules/vpc-flow-logs-bucket/versions.tf b/modules/vpc-flow-logs-bucket/versions.tf index 4076d48ca..cc73ffd35 100644 --- a/modules/vpc-flow-logs-bucket/versions.tf +++ b/modules/vpc-flow-logs-bucket/versions.tf @@ -1,18 +1,10 @@ terraform { - required_version = ">= 0.13.0" + required_version = ">= 1.0.0" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.0" - } - template = { - source = "hashicorp/template" - version = ">= 2.0" - } - local = { - source = "hashicorp/local" - version = ">= 1.3" + version = ">= 4.9.0" } } } diff --git a/modules/vpc-peering/README.md b/modules/vpc-peering/README.md index 41a69c6fa..fa3532890 100644 --- a/modules/vpc-peering/README.md +++ b/modules/vpc-peering/README.md @@ -1,3 +1,10 @@ +--- +tags: + - component/vpc-peering + - layer/network + - provider/aws +--- + # Component: `vpc-peering` This component is responsible for creating a peering connection between two VPCs existing in different AWS accounts. @@ -50,42 +57,43 @@ components: Use case: Peering v2 accounts to v2 ```yaml - vpc-peering/-vpc0: - metadata: - component: vpc-peering - inherits: - - vpc-peering/defaults - vars: - requester_vpc_component_name: vpc - accepter_region: us-east-1 - accepter_stage_name: - accepter_vpc: - tags: - # Fill in with your own information - Name: acme---- +vpc-peering/-vpc0: + metadata: + component: vpc-peering + inherits: + - vpc-peering/defaults + vars: + requester_vpc_component_name: vpc + accepter_region: us-east-1 + accepter_stage_name: + accepter_vpc: + tags: + # Fill in with your own information + Name: acme---- ``` ## Legacy Account Configuration The `vpc-peering` component peers the `dev`, `prod`, `sandbox` and `staging` VPCs to a VPC in the legacy account. -The `dev`, `prod`, `sandbox` and `staging` VPCs are the requesters of the VPC peering connection, -while the legacy VPC is the accepter of the peering connection. +The `dev`, `prod`, `sandbox` and `staging` VPCs are the requesters of the VPC peering connection, while the legacy VPC +is the accepter of the peering connection. -To provision VPC peering and all related resources with Terraform, we need the following information from the legacy account: - - - Legacy account ID - - Legacy VPC ID - - Legacy AWS region - - Legacy IAM role (the role must be created in the legacy account with permissions to create VPC peering and routes). - The name of the role could be `acme-vpc-peering` and the ARN of the role should look like `arn:aws:iam:::role/acme-vpc-peering` +To provision VPC peering and all related resources with Terraform, we need the following information from the legacy +account: +- Legacy account ID +- Legacy VPC ID +- Legacy AWS region +- Legacy IAM role (the role must be created in the legacy account with permissions to create VPC peering and routes). + The name of the role could be `acme-vpc-peering` and the ARN of the role should look like + `arn:aws:iam:::role/acme-vpc-peering` ### Legacy Account IAM Role In the legacy account, create IAM role `acme-vpc-peering` with the following policy: -__NOTE:__ Replace `` with the ID of the legacy account. +**NOTE:** Replace `` with the ID of the legacy account. ```json { @@ -93,10 +101,7 @@ __NOTE:__ Replace `` with the ID of the legacy account. "Statement": [ { "Effect": "Allow", - "Action": [ - "ec2:CreateRoute", - "ec2:DeleteRoute" - ], + "Action": ["ec2:CreateRoute", "ec2:DeleteRoute"], "Resource": "arn:aws:ec2:*::route-table/*" }, { @@ -126,10 +131,7 @@ __NOTE:__ Replace `` with the ID of the legacy account. }, { "Effect": "Allow", - "Action": [ - "ec2:DeleteTags", - "ec2:CreateTags" - ], + "Action": ["ec2:DeleteTags", "ec2:CreateTags"], "Resource": "arn:aws:ec2:*::vpc-peering-connection/*" } ] @@ -138,7 +140,7 @@ __NOTE:__ Replace `` with the ID of the legacy account. Add the following trust policy to the IAM role: -__NOTE:__ Replace `` with the ID of the `identity` account in the new infrastructure. +**NOTE:** Replace `` with the ID of the `identity` account in the new infrastructure. ```json { @@ -147,26 +149,22 @@ __NOTE:__ Replace `` with the ID of the `identity` account { "Effect": "Allow", "Principal": { - "AWS": [ - "arn:aws:iam:::root" - ] + "AWS": ["arn:aws:iam:::root"] }, - "Action": [ - "sts:AssumeRole", - "sts:TagSession" - ], + "Action": ["sts:AssumeRole", "sts:TagSession"], "Condition": {} } ] } ``` -The trust policy allows the `identity` account to assume the role (and provision all the resources in the legacy account). +The trust policy allows the `identity` account to assume the role (and provision all the resources in the legacy +account). ## Provisioning -Provision the VPC peering connections in the `dev`, `prod`, `sandbox` and `staging` accounts by executing -the following commands: +Provision the VPC peering connections in the `dev`, `prod`, `sandbox` and `staging` accounts by executing the following +commands: ```sh atmos terraform plan vpc-peering -s ue1-sandbox @@ -182,6 +180,7 @@ atmos terraform plan vpc-peering -s ue1-prod atmos terraform apply vpc-peering -s ue1-prod ``` + ## Requirements @@ -201,7 +200,7 @@ atmos terraform apply vpc-peering -s ue1-prod | Name | Source | Version | |------|--------|---------| | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [requester\_vpc](#module\_requester\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.3.1 | +| [requester\_vpc](#module\_requester\_vpc) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | | [vpc\_peering](#module\_vpc\_peering) | cloudposse/vpc-peering-multi-account/aws | 0.19.1 | @@ -229,8 +228,6 @@ atmos terraform apply vpc-peering -s ue1-prod | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -240,7 +237,9 @@ atmos terraform apply vpc-peering -s ue1-prod | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [requester\_allow\_remote\_vpc\_dns\_resolution](#input\_requester\_allow\_remote\_vpc\_dns\_resolution) | Allow requester VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the accepter VPC | `bool` | `true` | no | -| [requester\_vpc\_component\_name](#input\_requester\_vpc\_component\_name) | Requestor vpc component name | `string` | `"vpc"` | no | +| [requester\_role\_arn](#input\_requester\_role\_arn) | Requester AWS assume role ARN, if not provided it will be assumed to be the current terraform role. | `string` | `null` | no | +| [requester\_vpc\_component\_name](#input\_requester\_vpc\_component\_name) | Requester vpc component name | `string` | `"vpc"` | no | +| [requester\_vpc\_id](#input\_requester\_vpc\_id) | Requester VPC ID, if not provided, it will be looked up by component using variable `requester_vpc_component_name` | `string` | `null` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | @@ -251,7 +250,9 @@ atmos terraform apply vpc-peering -s ue1-prod |------|-------------| | [vpc\_peering](#output\_vpc\_peering) | VPC peering outputs | + -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/vpc-peering) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/vpc-peering) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/vpc-peering/main.tf b/modules/vpc-peering/main.tf index 292dfe062..00bccce7c 100644 --- a/modules/vpc-peering/main.tf +++ b/modules/vpc-peering/main.tf @@ -1,7 +1,7 @@ locals { enabled = module.this.enabled - requester_vpc_id = module.requester_vpc.outputs.vpc_id + requester_vpc_id = coalesce(var.requester_vpc_id, one(module.requester_vpc[*].outputs.vpc_id)) accepter_aws_assume_role_arn = var.accepter_stage_name != null ? module.iam_roles.terraform_role_arns[var.accepter_stage_name] : var.accepter_aws_assume_role_arn } @@ -24,7 +24,7 @@ module "vpc_peering" { auto_accept = var.auto_accept requester_allow_remote_vpc_dns_resolution = var.requester_allow_remote_vpc_dns_resolution - requester_aws_assume_role_arn = module.iam_roles.terraform_role_arn + requester_aws_assume_role_arn = coalesce(var.requester_role_arn, module.iam_roles.terraform_role_arn) requester_region = var.region requester_vpc_id = local.requester_vpc_id diff --git a/modules/vpc-peering/provider-accepter.tf b/modules/vpc-peering/provider-accepter.tf new file mode 100644 index 000000000..281e7ed2e --- /dev/null +++ b/modules/vpc-peering/provider-accepter.tf @@ -0,0 +1,9 @@ +provider "aws" { + alias = "accepter" + + region = var.accepter_region + + assume_role { + role_arn = local.accepter_aws_assume_role_arn + } +} diff --git a/modules/vpc-peering/providers.tf b/modules/vpc-peering/providers.tf index 6c306aeb1..ef923e10a 100644 --- a/modules/vpc-peering/providers.tf +++ b/modules/vpc-peering/providers.tf @@ -1,38 +1,19 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } -provider "aws" { - alias = "accepter" - - region = var.accepter_region - - assume_role { - role_arn = local.accepter_aws_assume_role_arn - } -} - module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/vpc-peering/remote-state.tf b/modules/vpc-peering/remote-state.tf index e0eae6f3b..41ed05d07 100644 --- a/modules/vpc-peering/remote-state.tf +++ b/modules/vpc-peering/remote-state.tf @@ -1,6 +1,8 @@ module "requester_vpc" { + count = var.requester_vpc_id == null ? 1 : 0 + source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.3.1" + version = "1.5.0" component = var.requester_vpc_component_name diff --git a/modules/vpc-peering/variables.tf b/modules/vpc-peering/variables.tf index b88a5383d..c7b43cd40 100644 --- a/modules/vpc-peering/variables.tf +++ b/modules/vpc-peering/variables.tf @@ -37,6 +37,18 @@ variable "accepter_stage_name" { default = null } +variable "requester_vpc_id" { + type = string + description = "Requester VPC ID, if not provided, it will be looked up by component using variable `requester_vpc_component_name`" + default = null +} + +variable "requester_role_arn" { + type = string + description = "Requester AWS assume role ARN, if not provided it will be assumed to be the current terraform role." + default = null +} + variable "requester_allow_remote_vpc_dns_resolution" { type = bool description = "Allow requester VPC to resolve public DNS hostnames to private IP addresses when queried from instances in the accepter VPC" @@ -45,6 +57,6 @@ variable "requester_allow_remote_vpc_dns_resolution" { variable "requester_vpc_component_name" { type = string - description = "Requestor vpc component name" + description = "Requester vpc component name" default = "vpc" } diff --git a/modules/vpc/README.md b/modules/vpc/README.md index 1e397ab68..cc9d93b7a 100644 --- a/modules/vpc/README.md +++ b/modules/vpc/README.md @@ -1,6 +1,15 @@ +--- +tags: + - component/vpc + - layer/network + - provider/aws +--- + # Component: `vpc` -This component is responsible for provisioning a VPC and corresponding Subnets. Additionally, VPC Flow Logs can optionally be enabled for auditing purposes. See the existing [VPC configuration](./vpc-configuration.md) documentation for the provisioned subnets. +This component is responsible for provisioning a VPC and corresponding Subnets. Additionally, VPC Flow Logs can +optionally be enabled for auditing purposes. See the existing VPC configuration documentation for the provisioned +subnets. ## Usage @@ -9,19 +18,50 @@ This component is responsible for provisioning a VPC and corresponding Subnets. Here's an example snippet for how to use this component. ```yaml +# catalog/vpc/defaults or catalog/vpc components: terraform: - vpc: + vpc/defaults: + metadata: + type: abstract + component: vpc + settings: + spacelift: + workspace_enabled: true vars: enabled: true - subnet_type_tag_key: "example.net/subnet/type" + name: vpc + availability_zones: + - "a" + - "b" + - "c" + nat_gateway_enabled: true + nat_instance_enabled: false + max_subnet_count: 3 vpc_flow_logs_enabled: true vpc_flow_logs_bucket_environment_name: - vpc_flow_logs_bucket_stage_name: "audit" + vpc_flow_logs_bucket_stage_name: audit vpc_flow_logs_traffic_type: "ALL" + subnet_type_tag_key: "example.net/subnet/type" + assign_generated_ipv6_cidr_block: true +``` + +```yaml +import: + - catalog/vpc + +components: + terraform: + vpc: + metadata: + component: vpc + inherits: + - vpc/defaults + vars: ipv4_primary_cidr_block: "10.111.0.0/18" ``` + ## Requirements @@ -40,13 +80,14 @@ components: | Name | Source | Version | |------|--------|---------| -| [endpoint\_security\_groups](#module\_endpoint\_security\_groups) | cloudposse/security-group/aws | 2.0.0-rc1 | +| [endpoint\_security\_groups](#module\_endpoint\_security\_groups) | cloudposse/security-group/aws | 2.2.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | -| [subnets](#module\_subnets) | cloudposse/dynamic-subnets/aws | 2.0.4 | +| [subnets](#module\_subnets) | cloudposse/dynamic-subnets/aws | 2.4.2 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | -| [vpc](#module\_vpc) | cloudposse/vpc/aws | 2.0.0-rc1 | -| [vpc\_endpoints](#module\_vpc\_endpoints) | cloudposse/vpc/aws//modules/vpc-endpoints | 2.0.0-rc1 | -| [vpc\_flow\_logs\_bucket](#module\_vpc\_flow\_logs\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.0.0 | +| [utils](#module\_utils) | cloudposse/utils/aws | 1.3.0 | +| [vpc](#module\_vpc) | cloudposse/vpc/aws | 2.1.0 | +| [vpc\_endpoints](#module\_vpc\_endpoints) | cloudposse/vpc/aws//modules/vpc-endpoints | 2.1.0 | +| [vpc\_flow\_logs\_bucket](#module\_vpc\_flow\_logs\_bucket) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | ## Resources @@ -62,22 +103,23 @@ components: | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | +| [assign\_generated\_ipv6\_cidr\_block](#input\_assign\_generated\_ipv6\_cidr\_block) | When `true`, assign AWS generated IPv6 CIDR block to the VPC. Conflicts with `ipv6_ipam_pool_id`. | `bool` | `false` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [availability\_zone\_ids](#input\_availability\_zone\_ids) | List of Availability Zones IDs where subnets will be created. Overrides `availability_zones`.
Useful in some regions when using only some AZs and you want to use the same ones across multiple accounts. | `list(string)` | `[]` | no | -| [availability\_zones](#input\_availability\_zones) | List of Availability Zones (AZs) where subnets will be created. Ignored when `availability_zone_ids` is set.
The order of zones in the list ***must be stable*** or else Terraform will continually make changes.
If no AZs are specified, then `max_subnet_count` AZs will be selected in alphabetical order.
If `max_subnet_count > 0` and `length(var.availability_zones) > max_subnet_count`, the list
will be truncated. We recommend setting `availability_zones` and `max_subnet_count` explicitly as constant
(not computed) values for predictability, consistency, and stability. | `list(string)` | `[]` | no | +| [availability\_zone\_ids](#input\_availability\_zone\_ids) | List of Availability Zones IDs where subnets will be created. Overrides `availability_zones`.
Can be the full name, e.g. `use1-az1`, or just the part after the AZ ID region code, e.g. `-az1`,
to allow reusable values across regions. Consider contention for resources and spot pricing in each AZ when selecting.
Useful in some regions when using only some AZs and you want to use the same ones across multiple accounts. | `list(string)` | `[]` | no | +| [availability\_zones](#input\_availability\_zones) | List of Availability Zones (AZs) where subnets will be created. Ignored when `availability_zone_ids` is set.
Can be the full name, e.g. `us-east-1a`, or just the part after the region, e.g. `a` to allow reusable values across regions.
The order of zones in the list ***must be stable*** or else Terraform will continually make changes.
If no AZs are specified, then `max_subnet_count` AZs will be selected in alphabetical order.
If `max_subnet_count > 0` and `length(var.availability_zones) > max_subnet_count`, the list
will be truncated. We recommend setting `availability_zones` and `max_subnet_count` explicitly as constant
(not computed) values for predictability, consistency, and stability. | `list(string)` | `[]` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | -| [eks\_tags\_enabled](#input\_eks\_tags\_enabled) | Whether or not to apply EKS-releated tags to resources | `bool` | `false` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [gateway\_vpc\_endpoints](#input\_gateway\_vpc\_endpoints) | A list of Gateway VPC Endpoints to provision into the VPC. Only valid values are "dynamodb" and "s3". | `set(string)` | `[]` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | | [interface\_vpc\_endpoints](#input\_interface\_vpc\_endpoints) | A list of Interface VPC Endpoints to provision into the VPC. | `set(string)` | `[]` | no | +| [ipv4\_additional\_cidr\_block\_associations](#input\_ipv4\_additional\_cidr\_block\_associations) | IPv4 CIDR blocks to assign to the VPC.
`ipv4_cidr_block` can be set explicitly, or set to `null` with the CIDR block derived from `ipv4_ipam_pool_id` using `ipv4_netmask_length`.
Map keys must be known at `plan` time, and are only used to track changes. |
map(object({
ipv4_cidr_block = string
ipv4_ipam_pool_id = string
ipv4_netmask_length = number
}))
| `{}` | no | +| [ipv4\_cidr\_block\_association\_timeouts](#input\_ipv4\_cidr\_block\_association\_timeouts) | Timeouts (in `go` duration format) for creating and destroying IPv4 CIDR block associations |
object({
create = string
delete = string
})
| `null` | no | | [ipv4\_cidrs](#input\_ipv4\_cidrs) | Lists of CIDRs to assign to subnets. Order of CIDRs in the lists must not change over time.
Lists may contain more CIDRs than needed. |
list(object({
private = list(string)
public = list(string)
}))
| `[]` | no | | [ipv4\_primary\_cidr\_block](#input\_ipv4\_primary\_cidr\_block) | The primary IPv4 CIDR block for the VPC.
Either `ipv4_primary_cidr_block` or `ipv4_primary_cidr_block_association` must be set, but not both. | `string` | `null` | no | +| [ipv4\_primary\_cidr\_block\_association](#input\_ipv4\_primary\_cidr\_block\_association) | Configuration of the VPC's primary IPv4 CIDR block via IPAM. Conflicts with `ipv4_primary_cidr_block`.
One of `ipv4_primary_cidr_block` or `ipv4_primary_cidr_block_association` must be set.
Additional CIDR blocks can be set via `ipv4_additional_cidr_block_associations`. |
object({
ipv4_ipam_pool_id = string
ipv4_netmask_length = number
})
| `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | @@ -88,6 +130,7 @@ components: | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | | [nat\_eip\_aws\_shield\_protection\_enabled](#input\_nat\_eip\_aws\_shield\_protection\_enabled) | Enable or disable AWS Shield Advanced protection for NAT EIPs. If set to 'true', a subscription to AWS Shield Advanced must exist in this account. | `bool` | `false` | no | | [nat\_gateway\_enabled](#input\_nat\_gateway\_enabled) | Flag to enable/disable NAT gateways | `bool` | `true` | no | +| [nat\_instance\_ami\_id](#input\_nat\_instance\_ami\_id) | A list optionally containing the ID of the AMI to use for the NAT instance.
If the list is empty (the default), the latest official AWS NAT instance AMI
will be used. NOTE: The Official NAT instance AMI is being phased out and
does not support NAT64. Use of a NAT gateway is recommended instead. | `list(string)` | `[]` | no | | [nat\_instance\_enabled](#input\_nat\_instance\_enabled) | Flag to enable/disable NAT instances | `bool` | `false` | no | | [nat\_instance\_type](#input\_nat\_instance\_type) | NAT Instance type | `string` | `"t3.micro"` | no | | [public\_subnets\_enabled](#input\_public\_subnets\_enabled) | If false, do not create public subnets.
Since NAT gateways and instances must be created in public subnets, these will also not be created when `false`. | `bool` | `true` | no | @@ -95,6 +138,8 @@ components: | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [subnet\_type\_tag\_key](#input\_subnet\_type\_tag\_key) | Key for subnet type tag to provide information about the type of subnets, e.g. `cpco/subnet/type=private` or `cpcp/subnet/type=public` | `string` | n/a | yes | +| [subnets\_per\_az\_count](#input\_subnets\_per\_az\_count) | The number of subnet of each type (public or private) to provision per Availability Zone. | `number` | `1` | no | +| [subnets\_per\_az\_names](#input\_subnets\_per\_az\_names) | The subnet names of each type (public or private) to provision per Availability Zone.
This variable is optional.
If a list of names is provided, the list items will be used as keys in the outputs `named_private_subnets_map`, `named_public_subnets_map`,
`named_private_route_table_ids_map` and `named_public_route_table_ids_map` | `list(string)` |
[
"common"
]
| no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | | [vpc\_flow\_logs\_bucket\_environment\_name](#input\_vpc\_flow\_logs\_bucket\_environment\_name) | The name of the environment where the VPC Flow Logs bucket is provisioned | `string` | `""` | no | @@ -109,6 +154,8 @@ components: | Name | Description | |------|-------------| | [availability\_zones](#output\_availability\_zones) | List of Availability Zones where subnets were created | +| [az\_private\_subnets\_map](#output\_az\_private\_subnets\_map) | Map of AZ names to list of private subnet IDs in the AZs | +| [az\_public\_subnets\_map](#output\_az\_public\_subnets\_map) | Map of AZ names to list of public subnet IDs in the AZs | | [interface\_vpc\_endpoints](#output\_interface\_vpc\_endpoints) | List of Interface VPC Endpoints in this VPC. | | [max\_subnet\_count](#output\_max\_subnet\_count) | Maximum allowed number of subnets before all subnet CIDRs need to be recomputed | | [nat\_eip\_protections](#output\_nat\_eip\_protections) | List of AWS Shield Advanced Protections for NAT Elastic IPs. | @@ -129,9 +176,11 @@ components: | [vpc\_default\_security\_group\_id](#output\_vpc\_default\_security\_group\_id) | The ID of the security group created by default on VPC creation | | [vpc\_id](#output\_vpc\_id) | VPC ID | + ## References -- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/vpc) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/vpc) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/vpc/main.tf b/modules/vpc/main.tf index 69153d214..ea9e41735 100644 --- a/modules/vpc/main.tf +++ b/modules/vpc/main.tf @@ -13,6 +13,24 @@ locals { ) ) + availability_zones = length(var.availability_zones) > 0 ? ( + (substr( + var.availability_zones[0], + 0, + length(var.region) + ) == var.region) ? var.availability_zones : formatlist("${var.region}%s", var.availability_zones) + ) : var.availability_zones + + short_region = module.utils.region_az_alt_code_maps["to_short"][var.region] + + availability_zone_ids = length(var.availability_zone_ids) > 0 ? ( + (substr( + var.availability_zone_ids[0], + 0, + length(local.short_region) + ) == local.short_region) ? var.availability_zone_ids : formatlist("${local.short_region}%s", var.availability_zone_ids) + ) : var.availability_zone_ids + # required tags to make ALB ingress work https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html # https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html public_subnets_additional_tags = { @@ -47,13 +65,22 @@ locals { } } } +module "utils" { + source = "cloudposse/utils/aws" + version = "1.3.0" +} + module "vpc" { source = "cloudposse/vpc/aws" - version = "2.0.0-rc1" + version = "2.1.0" ipv4_primary_cidr_block = var.ipv4_primary_cidr_block internet_gateway_enabled = var.public_subnets_enabled - assign_generated_ipv6_cidr_block = false # disable IPv6 + assign_generated_ipv6_cidr_block = var.assign_generated_ipv6_cidr_block + + ipv4_primary_cidr_block_association = var.ipv4_primary_cidr_block_association + ipv4_additional_cidr_block_associations = var.ipv4_additional_cidr_block_associations + ipv4_cidr_block_association_timeouts = var.ipv4_cidr_block_association_timeouts # Required for DNS resolution of VPC Endpoint interfaces, and generally harmless # See https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-support @@ -72,7 +99,7 @@ module "endpoint_security_groups" { for_each = local.enabled && try(length(var.interface_vpc_endpoints), 0) > 0 ? toset([local.interface_endpoint_security_group_key]) : [] source = "cloudposse/security-group/aws" - version = "2.0.0-rc1" + version = "2.2.0" create_before_destroy = true preserve_security_group_id = false @@ -97,12 +124,11 @@ module "endpoint_security_groups" { context = module.this.context } - module "vpc_endpoints" { source = "cloudposse/vpc/aws//modules/vpc-endpoints" - version = "2.0.0-rc1" + version = "2.1.0" - enabled = (length(var.interface_vpc_endpoints) + length(var.gateway_vpc_endpoints)) > 0 + enabled = local.enabled && (length(var.interface_vpc_endpoints) + length(var.gateway_vpc_endpoints)) > 0 vpc_id = module.vpc.vpc_id gateway_vpc_endpoints = local.gateway_endpoint_map @@ -113,10 +139,10 @@ module "vpc_endpoints" { module "subnets" { source = "cloudposse/dynamic-subnets/aws" - version = "2.0.4" + version = "2.4.2" - availability_zones = var.availability_zones - availability_zone_ids = var.availability_zone_ids + availability_zones = local.availability_zones + availability_zone_ids = local.availability_zone_ids ipv4_cidr_block = [module.vpc.vpc_cidr_block] ipv4_cidrs = var.ipv4_cidrs ipv6_enabled = false @@ -126,10 +152,13 @@ module "subnets" { nat_gateway_enabled = var.nat_gateway_enabled nat_instance_enabled = var.nat_instance_enabled nat_instance_type = var.nat_instance_type + nat_instance_ami_id = var.nat_instance_ami_id public_subnets_enabled = var.public_subnets_enabled public_subnets_additional_tags = local.public_subnets_additional_tags private_subnets_additional_tags = local.private_subnets_additional_tags vpc_id = module.vpc.vpc_id + subnets_per_az_count = var.subnets_per_az_count + subnets_per_az_names = var.subnets_per_az_names context = module.this.context } diff --git a/modules/vpc/outputs.tf b/modules/vpc/outputs.tf index 7459d8ea4..7e8ddd273 100644 --- a/modules/vpc/outputs.tf +++ b/modules/vpc/outputs.tf @@ -118,3 +118,13 @@ output "availability_zones" { description = "List of Availability Zones where subnets were created" value = module.subnets.availability_zones } + +output "az_private_subnets_map" { + description = "Map of AZ names to list of private subnet IDs in the AZs" + value = module.subnets.az_private_subnets_map +} + +output "az_public_subnets_map" { + description = "Map of AZ names to list of public subnet IDs in the AZs" + value = module.subnets.az_public_subnets_map +} diff --git a/modules/vpc/providers.tf b/modules/vpc/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/vpc/providers.tf +++ b/modules/vpc/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/vpc/remote-state.tf b/modules/vpc/remote-state.tf index 14ea76b3e..ac5ef9f92 100644 --- a/modules/vpc/remote-state.tf +++ b/modules/vpc/remote-state.tf @@ -1,15 +1,13 @@ - - module "vpc_flow_logs_bucket" { - count = var.vpc_flow_logs_enabled ? 1 : 0 + count = local.vpc_flow_logs_enabled ? 1 : 0 source = "cloudposse/stack-config/yaml//modules/remote-state" - version = "1.0.0" + version = "1.5.0" component = "vpc-flow-logs-bucket" environment = var.vpc_flow_logs_bucket_environment_name stage = var.vpc_flow_logs_bucket_stage_name - tenant = coalesce(var.vpc_flow_logs_bucket_tenant_name, module.this.tenant) + tenant = try(coalesce(var.vpc_flow_logs_bucket_tenant_name, module.this.tenant), null) context = module.this.context } diff --git a/modules/vpc/variables.tf b/modules/vpc/variables.tf index d63f6c29d..6e9940acb 100644 --- a/modules/vpc/variables.tf +++ b/modules/vpc/variables.tf @@ -7,6 +7,7 @@ variable "availability_zones" { type = list(string) description = <<-EOT List of Availability Zones (AZs) where subnets will be created. Ignored when `availability_zone_ids` is set. + Can be the full name, e.g. `us-east-1a`, or just the part after the region, e.g. `a` to allow reusable values across regions. The order of zones in the list ***must be stable*** or else Terraform will continually make changes. If no AZs are specified, then `max_subnet_count` AZs will be selected in alphabetical order. If `max_subnet_count > 0` and `length(var.availability_zones) > max_subnet_count`, the list @@ -20,6 +21,8 @@ variable "availability_zone_ids" { type = list(string) description = <<-EOT List of Availability Zones IDs where subnets will be created. Overrides `availability_zones`. + Can be the full name, e.g. `use1-az1`, or just the part after the AZ ID region code, e.g. `-az1`, + to allow reusable values across regions. Consider contention for resources and spot pricing in each AZ when selecting. Useful in some regions when using only some AZs and you want to use the same ones across multiple accounts. EOT default = [] @@ -34,6 +37,42 @@ variable "ipv4_primary_cidr_block" { default = null } +variable "ipv4_primary_cidr_block_association" { + type = object({ + ipv4_ipam_pool_id = string + ipv4_netmask_length = number + }) + description = <<-EOT + Configuration of the VPC's primary IPv4 CIDR block via IPAM. Conflicts with `ipv4_primary_cidr_block`. + One of `ipv4_primary_cidr_block` or `ipv4_primary_cidr_block_association` must be set. + Additional CIDR blocks can be set via `ipv4_additional_cidr_block_associations`. + EOT + default = null +} + +variable "ipv4_additional_cidr_block_associations" { + type = map(object({ + ipv4_cidr_block = string + ipv4_ipam_pool_id = string + ipv4_netmask_length = number + })) + description = <<-EOT + IPv4 CIDR blocks to assign to the VPC. + `ipv4_cidr_block` can be set explicitly, or set to `null` with the CIDR block derived from `ipv4_ipam_pool_id` using `ipv4_netmask_length`. + Map keys must be known at `plan` time, and are only used to track changes. + EOT + default = {} +} + +variable "ipv4_cidr_block_association_timeouts" { + type = object({ + create = string + delete = string + }) + description = "Timeouts (in `go` duration format) for creating and destroying IPv4 CIDR block associations" + default = null +} + variable "ipv4_cidrs" { type = list(object({ private = list(string) @@ -50,6 +89,12 @@ variable "ipv4_cidrs" { } } +variable "assign_generated_ipv6_cidr_block" { + type = bool + description = "When `true`, assign AWS generated IPv6 CIDR block to the VPC. Conflicts with `ipv6_ipam_pool_id`." + default = false +} + variable "public_subnets_enabled" { type = bool description = <<-EOT @@ -77,6 +122,17 @@ variable "nat_instance_type" { default = "t3.micro" } +variable "nat_instance_ami_id" { + type = list(string) + description = <<-EOT + A list optionally containing the ID of the AMI to use for the NAT instance. + If the list is empty (the default), the latest official AWS NAT instance AMI + will be used. NOTE: The Official NAT instance AMI is being phased out and + does not support NAT64. Use of a NAT gateway is recommended instead. + EOT + default = [] +} + variable "map_public_ip_on_launch" { type = bool default = true @@ -140,12 +196,6 @@ variable "nat_eip_aws_shield_protection_enabled" { default = false } -variable "eks_tags_enabled" { - type = bool - description = "Whether or not to apply EKS-releated tags to resources" - default = false -} - variable "gateway_vpc_endpoints" { type = set(string) description = "A list of Gateway VPC Endpoints to provision into the VPC. Only valid values are \"dynamodb\" and \"s3\"." @@ -157,3 +207,30 @@ variable "interface_vpc_endpoints" { description = "A list of Interface VPC Endpoints to provision into the VPC." default = [] } + +variable "subnets_per_az_count" { + type = number + description = <<-EOT + The number of subnet of each type (public or private) to provision per Availability Zone. + EOT + default = 1 + nullable = false + validation { + condition = var.subnets_per_az_count > 0 + # Validation error messages must be on a single line, among other restrictions. + # See https://github.com/hashicorp/terraform/issues/24123 + error_message = "The `subnets_per_az` value must be greater than 0." + } +} + +variable "subnets_per_az_names" { + type = list(string) + description = <<-EOT + The subnet names of each type (public or private) to provision per Availability Zone. + This variable is optional. + If a list of names is provided, the list items will be used as keys in the outputs `named_private_subnets_map`, `named_public_subnets_map`, + `named_private_route_table_ids_map` and `named_public_route_table_ids_map` + EOT + default = ["common"] + nullable = false +} diff --git a/modules/waf/README.md b/modules/waf/README.md index ca37fa944..71b523357 100644 --- a/modules/waf/README.md +++ b/modules/waf/README.md @@ -1,7 +1,14 @@ -# Component: `aws-waf-acl` +--- +tags: + - component/waf + - layer/addons + - provider/aws +--- -This component is responsible for provisioning an AWS Web Application Firewall (WAF) with an associated managed rule group. +# Component: `waf` +This component is responsible for provisioning an AWS Web Application Firewall (WAF) with an associated managed rule +group. ## Usage @@ -12,49 +19,56 @@ Here's an example snippet for how to use this component. ```yaml components: terraform: - aws-waf-acl: + waf: vars: enabled: true name: waf acl_name: default default_action: allow description: Default web ACL + visibility_config: + cloudwatch_metrics_enabled: false + metric_name: "default" + sampled_requests_enabled: false managed_rule_group_statement_rules: - - name: "OWASP-10" - # Rules are processed in order based on the value of priority, lowest number first - priority: 1 - - statement: - name: AWSManagedRulesCommonRuleSet - vendor_name: AWS - - visibility_config: - # Defines and enables Amazon CloudWatch metrics and web request sample collection. - cloudwatch_metrics_enabled: false - metric_name: "OWASP-10" - sampled_requests_enabled: false + - name: "OWASP-10" + # Rules are processed in order based on the value of priority, lowest number first + priority: 1 + + statement: + name: AWSManagedRulesCommonRuleSet + vendor_name: AWS + + visibility_config: + # Defines and enables Amazon CloudWatch metrics and web request sample collection. + cloudwatch_metrics_enabled: false + metric_name: "OWASP-10" + sampled_requests_enabled: false ``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | ~> 4.0 | +| [terraform](#requirement\_terraform) | >= 1.3.0 | +| [aws](#requirement\_aws) | >= 5.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | ~> 4.0 | +| [aws](#provider\_aws) | >= 5.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [aws\_waf](#module\_aws\_waf) | cloudposse/waf/aws | 0.0.4 | +| [association\_resource\_components](#module\_association\_resource\_components) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | +| [aws\_waf](#module\_aws\_waf) | cloudposse/waf/aws | 1.8.0 | | [iam\_roles](#module\_iam\_roles) | ../account-map/modules/iam-roles | n/a | +| [log\_destination\_components](#module\_log\_destination\_components) | cloudposse/stack-config/yaml//modules/remote-state | 1.5.0 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 | ## Resources @@ -62,6 +76,8 @@ components: | Name | Type | |------|------| | [aws_ssm_parameter.acl_arn](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_alb.alb](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/alb) | data source | +| [aws_lbs.alb_by_tags](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lbs) | data source | ## Inputs @@ -69,55 +85,66 @@ components: |------|-------------|------|---------|:--------:| | [acl\_name](#input\_acl\_name) | Friendly name of the ACL. The ACL ARN will be stored in SSM under {ssm\_path\_prefix}/{acl\_name}/arn | `string` | n/a | yes | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no | -| [association\_resource\_arns](#input\_association\_resource\_arns) | A list of ARNs of the resources to associate with the web ACL.
This must be an ARN of an Application Load Balancer or an Amazon API Gateway stage. | `list(string)` | `[]` | no | +| [alb\_names](#input\_alb\_names) | list of ALB names to associate with the web ACL. | `list(string)` | `[]` | no | +| [alb\_tags](#input\_alb\_tags) | list of tags to match one or more ALBs to associate with the web ACL. | `list(map(string))` | `[]` | no | +| [association\_resource\_arns](#input\_association\_resource\_arns) | A list of ARNs of the resources to associate with the web ACL.
This must be an ARN of an Application Load Balancer, Amazon API Gateway stage, or AWS AppSync.

Do not use this variable to associate a Cloudfront Distribution.
Instead, you should use the `web_acl_id` property on the `cloudfront_distribution` resource.
For more details, refer to https://docs.aws.amazon.com/waf/latest/APIReference/API_AssociateWebACL.html | `list(string)` | `[]` | no | +| [association\_resource\_component\_selectors](#input\_association\_resource\_component\_selectors) | A list of Atmos component selectors to get from the remote state and associate their ARNs with the web ACL.
The components must be Application Load Balancers, Amazon API Gateway stages, or AWS AppSync.

component:
Atmos component name
component\_arn\_output:
The component output that defines the component ARN

Set `tenant`, `environment` and `stage` if the components are in different OUs, regions or accounts.

Do not use this variable to select a Cloudfront Distribution component.
Instead, you should use the `web_acl_id` property on the `cloudfront_distribution` resource.
For more details, refer to https://docs.aws.amazon.com/waf/latest/APIReference/API_AssociateWebACL.html |
list(object({
component = string
namespace = optional(string, null)
tenant = optional(string, null)
environment = optional(string, null)
stage = optional(string, null)
component_arn_output = string
}))
| `[]` | no | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no | -| [byte\_match\_statement\_rules](#input\_byte\_match\_statement\_rules) | A rule statement that defines a string match search for AWS WAF to apply to web requests.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | +| [byte\_match\_statement\_rules](#input\_byte\_match\_statement\_rules) | A rule statement that defines a string match search for AWS WAF to apply to web requests.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
positional\_constraint:
Area within the portion of a web request that you want AWS WAF to search for search\_string. Valid values include the following: EXACTLY, STARTS\_WITH, ENDS\_WITH, CONTAINS, CONTAINS\_WORD.
search\_string
String value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in field\_to\_match.
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` |
{
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no | +| [custom\_response\_body](#input\_custom\_response\_body) | Defines custom response bodies that can be referenced by custom\_response actions.
The map keys are used as the `key` attribute which is a unique key identifying the custom response body.
content:
Payload of the custom response.
The response body can be plain text, HTML or JSON and cannot exceed 4KB in size.
content\_type:
Content Type of Response Body.
Valid values are `TEXT_PLAIN`, `TEXT_HTML`, or `APPLICATION_JSON`. |
map(object({
content = string
content_type = string
}))
| `{}` | no | | [default\_action](#input\_default\_action) | Specifies that AWS WAF should allow requests by default. Possible values: `allow`, `block`. | `string` | `"block"` | no | +| [default\_block\_response](#input\_default\_block\_response) | A HTTP response code that is sent when default action is used. Only takes effect if default\_action is set to `block`. | `string` | `null` | no | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no | | [description](#input\_description) | A friendly description of the WebACL. | `string` | `"Managed by Terraform"` | no | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | -| [geo\_match\_statement\_rules](#input\_geo\_match\_statement\_rules) | A rule statement used to identify web requests based on country of origin.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
country\_codes:
A list of two-character country codes.
forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | +| [geo\_allowlist\_statement\_rules](#input\_geo\_allowlist\_statement\_rules) | A rule statement used to identify a list of allowed countries which should not be blocked by the WAF.

name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
country\_codes:
A list of two-character country codes.
forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | +| [geo\_match\_statement\_rules](#input\_geo\_match\_statement\_rules) | A rule statement used to identify web requests based on country of origin.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
country\_codes:
A list of two-character country codes.
forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_profile\_name](#input\_import\_profile\_name) | AWS Profile name to use when importing a resource | `string` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | -| [ip\_set\_reference\_statement\_rules](#input\_ip\_set\_reference\_statement\_rules) | A rule statement used to detect web requests coming from particular IP addresses or address ranges.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
arn:
The ARN of the IP Set that this statement references.
ip\_set\_forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.
position:
The position in the header to search for the IP address.
Possible values include: `FIRST`, `LAST`, or `ANY`.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | +| [ip\_set\_reference\_statement\_rules](#input\_ip\_set\_reference\_statement\_rules) | A rule statement used to detect web requests coming from particular IP addresses or address ranges.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
arn:
The ARN of the IP Set that this statement references.
ip\_set:
Defines a new IP Set

description:
A friendly description of the IP Set
addresses:
Contains an array of strings that specifies zero or more IP addresses or blocks of IP addresses.
All addresses must be specified using Classless Inter-Domain Routing (CIDR) notation.
ip\_address\_version:
Specify `IPV4` or `IPV6`
ip\_set\_forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.
position:
The position in the header to search for the IP address.
Possible values include: `FIRST`, `LAST`, or `ANY`.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | -| [log\_destination\_configs](#input\_log\_destination\_configs) | The Amazon Kinesis Data Firehose ARNs. | `list(string)` | `[]` | no | -| [managed\_rule\_group\_statement\_rules](#input\_managed\_rule\_group\_statement\_rules) | A rule statement used to run the rules that are defined in a managed rule group.

name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

override\_action:
The override action to apply to the rules in a rule group.
Possible values: `count`, `none`

statement:
name:
The name of the managed rule group.
vendor\_name:
The name of the managed rule group vendor.
excluded\_rule:
The list of names of the rules to exclude.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | +| [log\_destination\_component\_selectors](#input\_log\_destination\_component\_selectors) | A list of Atmos component selectors to get from the remote state and associate their names/ARNs with the WAF logs.
The components must be Amazon Kinesis Data Firehose, CloudWatch Log Group, or S3 bucket.

component:
Atmos component name
component\_output:
The component output that defines the component name or ARN

Set `tenant`, `environment` and `stage` if the components are in different OUs, regions or accounts.

Note: data firehose, log group, or bucket name must be prefixed with `aws-waf-logs-`,
e.g. `aws-waf-logs-example-firehose`, `aws-waf-logs-example-log-group`, or `aws-waf-logs-example-bucket`. |
list(object({
component = string
namespace = optional(string, null)
tenant = optional(string, null)
environment = optional(string, null)
stage = optional(string, null)
component_output = string
}))
| `[]` | no | +| [log\_destination\_configs](#input\_log\_destination\_configs) | A list of resource names/ARNs to associate Amazon Kinesis Data Firehose, Cloudwatch Log log group, or S3 bucket with the WAF logs.
Note: data firehose, log group, or bucket name must be prefixed with `aws-waf-logs-`,
e.g. `aws-waf-logs-example-firehose`, `aws-waf-logs-example-log-group`, or `aws-waf-logs-example-bucket`. | `list(string)` | `[]` | no | +| [logging\_filter](#input\_logging\_filter) | A configuration block that specifies which web requests are kept in the logs and which are dropped.
You can filter on the rule action and on the web request labels that were applied by matching rules during web ACL evaluation. |
object({
default_behavior = string
filter = list(object({
behavior = string
requirement = string
condition = list(object({
action_condition = optional(object({
action = string
}), null)
label_name_condition = optional(object({
label_name = string
}), null)
}))
}))
})
| `null` | no | +| [managed\_rule\_group\_statement\_rules](#input\_managed\_rule\_group\_statement\_rules) | A rule statement used to run the rules that are defined in a managed rule group.

name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

override\_action:
The override action to apply to the rules in a rule group.
Possible values: `count`, `none`

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
name:
The name of the managed rule group.
vendor\_name:
The name of the managed rule group vendor.
version:
The version of the managed rule group.
You can set `Version_1.0` or `Version_1.1` etc. If you want to use the default version, do not set anything.
rule\_action\_override:
Action settings to use in the place of the rule actions that are configured inside the rule group.
You specify one override for each rule whose action you want to change.
managed\_rule\_group\_configs:
Additional information that's used by a managed rule group. Only one rule attribute is allowed in each config.
Refer to https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-list.html for more details.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
override_action = optional(string)
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = object({
name = string
vendor_name = string
version = optional(string)
rule_action_override = optional(map(object({
action = string
custom_request_handling = optional(object({
insert_header = object({
name = string
value = string
})
}), null)
custom_response = optional(object({
response_code = string
response_header = optional(object({
name = string
value = string
}), null)
}), null)
})), null)
managed_rule_group_configs = optional(list(object({
aws_managed_rules_bot_control_rule_set = optional(object({
inspection_level = string
enable_machine_learning = optional(bool, true)
}), null)
aws_managed_rules_atp_rule_set = optional(object({
enable_regex_in_path = optional(bool)
login_path = string
request_inspection = optional(object({
payload_type = string
password_field = object({
identifier = string
})
username_field = object({
identifier = string
})
}), null)
response_inspection = optional(object({
body_contains = optional(object({
success_strings = list(string)
failure_strings = list(string)
}), null)
header = optional(object({
name = string
success_values = list(string)
failure_values = list(string)
}), null)
json = optional(object({

identifier = string
success_strings = list(string)
failure_strings = list(string)
}), null)
status_code = optional(object({
success_codes = list(string)
failure_codes = list(string)
}), null)
}), null)
}), null)
})), null)
})
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [rate\_based\_statement\_rules](#input\_rate\_based\_statement\_rules) | A rate-based rule tracks the rate of requests for each originating IP address,
and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
aggregate\_key\_type:
Setting that indicates how to aggregate the request counts.
Possible values include: `FORWARDED_IP` or `IP`
limit:
The limit on requests per 5-minute period for a single originating IP address.
forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | -| [redacted\_fields](#input\_redacted\_fields) | The parts of the request that you want to keep out of the logs.
method\_enabled:
Whether to enable redaction of the HTTP method.
The method indicates the type of operation that the request is asking the origin to perform.
uri\_path\_enabled:
Whether to enable redaction of the URI path.
This is the part of a web request that identifies a resource.
query\_string\_enabled:
Whether to enable redaction of the query string.
This is the part of a URL that appears after a `?` character, if any.
single\_header:
The list of names of the query headers to redact. | `map(any)` | `{}` | no | -| [regex\_pattern\_set\_reference\_statement\_rules](#input\_regex\_pattern\_set\_reference\_statement\_rules) | A rule statement used to search web request components for matches with regular expressions.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
arn:
The Amazon Resource Name (ARN) of the Regex Pattern Set that this statement references.
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | +| [rate\_based\_statement\_rules](#input\_rate\_based\_statement\_rules) | A rate-based rule tracks the rate of requests for each originating IP address,
and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
aggregate\_key\_type:
Setting that indicates how to aggregate the request counts.
Possible values include: `FORWARDED_IP` or `IP`
limit:
The limit on requests per 5-minute period for a single originating IP address.
evaluation\_window\_sec:
The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time.
Valid values are 60, 120, 300, and 600. Defaults to 300 (5 minutes).
forwarded\_ip\_config:
fallback\_behavior:
The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.
Possible values: `MATCH`, `NO_MATCH`
header\_name:
The name of the HTTP header to use for the IP address.
byte\_match\_statement:
field\_to\_match:
Part of a web request that you want AWS WAF to inspect.
positional\_constraint:
Area within the portion of a web request that you want AWS WAF to search for search\_string.
Valid values include the following: `EXACTLY`, `STARTS_WITH`, `ENDS_WITH`, `CONTAINS`, `CONTAINS_WORD`.
search\_string:
String value that you want AWS WAF to search for.
AWS WAF searches only in the part of web requests that you designate for inspection in `field_to_match`.
The maximum length of the value is 50 bytes.
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = object({
limit = number
aggregate_key_type = string
evaluation_window_sec = optional(number)
forwarded_ip_config = optional(object({
fallback_behavior = string
header_name = string
}), null)
scope_down_statement = optional(object({
byte_match_statement = object({
positional_constraint = string
search_string = string
field_to_match = object({
all_query_arguments = optional(bool)
body = optional(bool)
method = optional(bool)
query_string = optional(bool)
single_header = optional(object({ name = string }))
single_query_argument = optional(object({ name = string }))
uri_path = optional(bool)
})
text_transformation = list(object({
priority = number
type = string
}))
})
}), null)
})
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | +| [redacted\_fields](#input\_redacted\_fields) | The parts of the request that you want to keep out of the logs.
You can only specify one of the following: `method`, `query_string`, `single_header`, or `uri_path`

method:
Whether to enable redaction of the HTTP method.
The method indicates the type of operation that the request is asking the origin to perform.
uri\_path:
Whether to enable redaction of the URI path.
This is the part of a web request that identifies a resource.
query\_string:
Whether to enable redaction of the query string.
This is the part of a URL that appears after a `?` character, if any.
single\_header:
The list of names of the query headers to redact. |
map(object({
method = optional(bool, false)
uri_path = optional(bool, false)
query_string = optional(bool, false)
single_header = optional(list(string), null)
}))
| `{}` | no | +| [regex\_match\_statement\_rules](#input\_regex\_match\_statement\_rules) | A rule statement used to search web request components for a match against a single regular expression.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
regex\_string:
String representing the regular expression. Minimum of 1 and maximum of 512 characters.
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl.html#field_to_match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. At least one required.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | +| [regex\_pattern\_set\_reference\_statement\_rules](#input\_regex\_pattern\_set\_reference\_statement\_rules) | A rule statement used to search web request components for matches with regular expressions.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
arn:
The Amazon Resource Name (ARN) of the Regex Pattern Set that this statement references.
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [rule\_group\_reference\_statement\_rules](#input\_rule\_group\_reference\_statement\_rules) | A rule statement used to run the rules that are defined in an WAFv2 Rule Group.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

override\_action:
The override action to apply to the rules in a rule group.
Possible values: `count`, `none`

statement:
arn:
The ARN of the `aws_wafv2_rule_group` resource.
excluded\_rule:
The list of names of the rules to exclude.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | +| [rule\_group\_reference\_statement\_rules](#input\_rule\_group\_reference\_statement\_rules) | A rule statement used to run the rules that are defined in an WAFv2 Rule Group.

name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

override\_action:
The override action to apply to the rules in a rule group.
Possible values: `count`, `none`

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
arn:
The ARN of the `aws_wafv2_rule_group` resource.
rule\_action\_override:
Action settings to use in the place of the rule actions that are configured inside the rule group.
You specify one override for each rule whose action you want to change.

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
override_action = optional(string)
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = object({
arn = string
rule_action_override = optional(map(object({
action = string
custom_request_handling = optional(object({
insert_header = object({
name = string
value = string
})
}), null)
custom_response = optional(object({
response_code = string
response_header = optional(object({
name = string
value = string
}), null)
}), null)
})), null)
})
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | | [scope](#input\_scope) | Specifies whether this is for an AWS CloudFront distribution or for a regional application.
Possible values are `CLOUDFRONT` or `REGIONAL`.
To work with CloudFront, you must also specify the region us-east-1 (N. Virginia) on the AWS provider. | `string` | `"REGIONAL"` | no | -| [size\_constraint\_statement\_rules](#input\_size\_constraint\_statement\_rules) | A rule statement that uses a comparison operator to compare a number of bytes against the size of a request component.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
comparison\_operator:
The operator to use to compare the request part to the size setting.
Possible values: `EQ`, `NE`, `LE`, `LT`, `GE`, or `GT`.
size:
The size, in bytes, to compare to the request part, after any transformations.
Valid values are integers between `0` and `21474836480`, inclusive.
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | -| [sqli\_match\_statement\_rules](#input\_sqli\_match\_statement\_rules) | An SQL injection match condition identifies the part of web requests,
such as the URI or the query string, that you want AWS WAF to inspect.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

statement:
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | +| [size\_constraint\_statement\_rules](#input\_size\_constraint\_statement\_rules) | A rule statement that uses a comparison operator to compare a number of bytes against the size of a request component.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
comparison\_operator:
The operator to use to compare the request part to the size setting.
Possible values: `EQ`, `NE`, `LE`, `LT`, `GE`, or `GT`.
size:
The size, in bytes, to compare to the request part, after any transformations.
Valid values are integers between `0` and `21474836480`, inclusive.
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | +| [sqli\_match\_statement\_rules](#input\_sqli\_match\_statement\_rules) | An SQL injection match condition identifies the part of web requests,
such as the URI or the query string, that you want AWS WAF to inspect.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

statement:
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | | [ssm\_path\_prefix](#input\_ssm\_path\_prefix) | SSM path prefix (with leading but not trailing slash) under which to store all WAF info | `string` | `"/waf"` | no | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no | -| [visibility\_config](#input\_visibility\_config) | Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `map(string)` | `{}` | no | -| [xss\_match\_statement\_rules](#input\_xss\_match\_statement\_rules) | A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

xss\_match\_statement:
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. | `list(any)` | `null` | no | +| [token\_domains](#input\_token\_domains) | Specifies the domains that AWS WAF should accept in a web request token.
This enables the use of tokens across multiple protected websites.
When AWS WAF provides a token, it uses the domain of the AWS resource that the web ACL is protecting.
If you don't specify a list of token domains, AWS WAF accepts tokens only for the domain of the protected resource.
With a token domain list, AWS WAF accepts the resource's host domain plus all domains in the token domain list,
including their prefixed subdomains. | `list(string)` | `null` | no | +| [visibility\_config](#input\_visibility\_config) | Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
object({
cloudwatch_metrics_enabled = bool
metric_name = string
sampled_requests_enabled = bool
})
| n/a | yes | +| [xss\_match\_statement\_rules](#input\_xss\_match\_statement\_rules) | A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests.

action:
The action that AWS WAF should take on a web request when it matches the rule's statement.
name:
A friendly name of the rule.
priority:
If you define more than one Rule in a WebACL,
AWS WAF evaluates each request against the rules in order based on the value of priority.
AWS WAF processes rules with lower priority first.

captcha\_config:
Specifies how AWS WAF should handle CAPTCHA evaluations.

immunity\_time\_property:
Defines custom immunity time.

immunity\_time:
The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300.

rule\_label:
A List of labels to apply to web requests that match the rule match statement

statement:
field\_to\_match:
The part of a web request that you want AWS WAF to inspect.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match
text\_transformation:
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation

visibility\_config:
Defines and enables Amazon CloudWatch metrics and web request sample collection.

cloudwatch\_metrics\_enabled:
Whether the associated resource sends metrics to CloudWatch.
metric\_name:
A friendly name of the CloudWatch metric.
sampled\_requests\_enabled:
Whether AWS WAF should store a sampling of the web requests that match the rules. |
list(object({
name = string
priority = number
action = string
captcha_config = optional(object({
immunity_time_property = object({
immunity_time = number
})
}), null)
rule_label = optional(list(string), null)
statement = any
visibility_config = optional(object({
cloudwatch_metrics_enabled = optional(bool)
metric_name = string
sampled_requests_enabled = optional(bool)
}), null)
}))
| `null` | no | ## Outputs | Name | Description | |------|-------------| -| [waf](#output\_waf) | Information about the created WAF ACL | +| [arn](#output\_arn) | The ARN of the WAF WebACL. | +| [id](#output\_id) | The ID of the WAF WebACL. | +| [logging\_config\_id](#output\_logging\_config\_id) | The ARN of the WAFv2 Web ACL logging configuration. | - + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/waf) - Cloud Posse's upstream component +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/waf) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/waf/alb.tf b/modules/waf/alb.tf new file mode 100644 index 000000000..a978df085 --- /dev/null +++ b/modules/waf/alb.tf @@ -0,0 +1,15 @@ +locals { + alb_arns = concat(local.alb_name_arns, local.alb_tag_arns) + alb_name_arns = [for alb_instance in data.aws_alb.alb : alb_instance.arn] + alb_tag_arns = flatten([for alb_instance in data.aws_lbs.alb_by_tags : alb_instance.arns]) +} + +data "aws_alb" "alb" { + for_each = toset(var.alb_names) + name = each.key +} + +data "aws_lbs" "alb_by_tags" { + for_each = { for i, v in var.alb_tags : i => v } + tags = each.value +} diff --git a/modules/waf/default.auto.tfvars b/modules/waf/default.auto.tfvars deleted file mode 100644 index bccc95614..000000000 --- a/modules/waf/default.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -# This file is included by default in terraform plans - -enabled = false diff --git a/modules/waf/main.tf b/modules/waf/main.tf index 17ada3147..04ef1ae2a 100644 --- a/modules/waf/main.tf +++ b/modules/waf/main.tf @@ -1,34 +1,61 @@ locals { enabled = module.this.enabled + + association_resource_component_selectors_arns = [ + for i, v in var.association_resource_component_selectors : module.association_resource_components[i].outputs[v.component_arn_output] + if local.enabled + ] + + association_resource_arns = toset(concat(var.association_resource_arns, local.association_resource_component_selectors_arns, local.alb_arns)) + + log_destination_component_selectors = [ + for i, v in var.log_destination_component_selectors : module.log_destination_components[i].outputs[v.component_output] + if local.enabled + ] + + log_destination_configs = concat(var.log_destination_configs, local.log_destination_component_selectors) } module "aws_waf" { source = "cloudposse/waf/aws" - version = "0.0.4" + version = "1.8.0" - association_resource_arns = var.association_resource_arns + description = var.description + default_action = var.default_action + custom_response_body = var.custom_response_body + scope = var.scope + visibility_config = var.visibility_config + token_domains = var.token_domains + + # Association resources + association_resource_arns = local.association_resource_arns + + # Logging configuration + redacted_fields = var.redacted_fields + logging_filter = var.logging_filter + log_destination_configs = local.log_destination_configs + + # Rules byte_match_statement_rules = var.byte_match_statement_rules - default_action = var.default_action - description = var.description + geo_allowlist_statement_rules = var.geo_allowlist_statement_rules geo_match_statement_rules = var.geo_match_statement_rules ip_set_reference_statement_rules = var.ip_set_reference_statement_rules - log_destination_configs = var.log_destination_configs managed_rule_group_statement_rules = var.managed_rule_group_statement_rules rate_based_statement_rules = var.rate_based_statement_rules - redacted_fields = var.redacted_fields regex_pattern_set_reference_statement_rules = var.regex_pattern_set_reference_statement_rules + regex_match_statement_rules = var.regex_match_statement_rules rule_group_reference_statement_rules = var.rule_group_reference_statement_rules - scope = var.scope size_constraint_statement_rules = var.size_constraint_statement_rules sqli_match_statement_rules = var.sqli_match_statement_rules - visibility_config = var.visibility_config xss_match_statement_rules = var.xss_match_statement_rules + default_block_response = var.default_block_response context = module.this.context } resource "aws_ssm_parameter" "acl_arn" { - count = local.enabled ? 1 : 0 + count = local.enabled ? 1 : 0 + name = "${var.ssm_path_prefix}/${var.acl_name}/arn" value = module.aws_waf.arn description = "ARN for WAF web ACL ${var.acl_name}" diff --git a/modules/waf/outputs.tf b/modules/waf/outputs.tf index 6a422df7e..2897e7b9f 100644 --- a/modules/waf/outputs.tf +++ b/modules/waf/outputs.tf @@ -1,4 +1,14 @@ -output "waf" { - value = module.aws_waf - description = "Information about the created WAF ACL" +output "id" { + description = "The ID of the WAF WebACL." + value = module.aws_waf.id +} + +output "arn" { + description = "The ARN of the WAF WebACL." + value = module.aws_waf.arn +} + +output "logging_config_id" { + description = "The ARN of the WAFv2 Web ACL logging configuration." + value = module.aws_waf.logging_config_id } diff --git a/modules/waf/providers.tf b/modules/waf/providers.tf index 08ee01b2a..ef923e10a 100644 --- a/modules/waf/providers.tf +++ b/modules/waf/providers.tf @@ -1,12 +1,14 @@ provider "aws" { region = var.region - profile = module.iam_roles.profiles_enabled ? coalesce(var.import_profile_name, module.iam_roles.terraform_profile_name) : null + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name dynamic "assume_role" { - for_each = module.iam_roles.profiles_enabled ? [] : ["role"] + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) content { - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + role_arn = assume_role.value } } } @@ -15,15 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_profile_name" { - type = string - default = null - description = "AWS Profile name to use when importing a resource" -} - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/waf/remote-state.tf b/modules/waf/remote-state.tf new file mode 100755 index 000000000..db3a6c7e8 --- /dev/null +++ b/modules/waf/remote-state.tf @@ -0,0 +1,29 @@ +module "association_resource_components" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.enabled ? length(var.association_resource_component_selectors) : 0 + + component = var.association_resource_component_selectors[count.index].component + namespace = coalesce(lookup(var.association_resource_component_selectors[count.index], "namespace", null), module.this.namespace) + tenant = coalesce(lookup(var.association_resource_component_selectors[count.index], "tenant", null), module.this.tenant) + environment = coalesce(lookup(var.association_resource_component_selectors[count.index], "environment", null), module.this.environment) + stage = coalesce(lookup(var.association_resource_component_selectors[count.index], "stage", null), module.this.stage) + + context = module.this.context +} + +module "log_destination_components" { + source = "cloudposse/stack-config/yaml//modules/remote-state" + version = "1.5.0" + + count = local.enabled ? length(var.log_destination_component_selectors) : 0 + + component = var.log_destination_component_selectors[count.index].component + namespace = coalesce(lookup(var.log_destination_component_selectors[count.index], "namespace", null), module.this.namespace) + tenant = coalesce(lookup(var.log_destination_component_selectors[count.index], "tenant", null), module.this.tenant) + environment = coalesce(lookup(var.log_destination_component_selectors[count.index], "environment", null), module.this.environment) + stage = coalesce(lookup(var.log_destination_component_selectors[count.index], "stage", null), module.this.stage) + + context = module.this.context +} diff --git a/modules/waf/variables.tf b/modules/waf/variables.tf index 11f415515..ac04944f9 100644 --- a/modules/waf/variables.tf +++ b/modules/waf/variables.tf @@ -14,20 +14,50 @@ variable "acl_name" { description = "Friendly name of the ACL. The ACL ARN will be stored in SSM under {ssm_path_prefix}/{acl_name}/arn" } +variable "description" { + type = string + default = "Managed by Terraform" + description = "A friendly description of the WebACL." +} + variable "default_action" { type = string default = "block" description = "Specifies that AWS WAF should allow requests by default. Possible values: `allow`, `block`." + nullable = false validation { condition = contains(["allow", "block"], var.default_action) error_message = "Allowed values: `allow`, `block`." } } -variable "description" { +variable "default_block_response" { type = string - default = "Managed by Terraform" - description = "A friendly description of the WebACL." + default = null + description = <<-DOC + A HTTP response code that is sent when default action is used. Only takes effect if default_action is set to `block`. + DOC + nullable = true +} + +variable "custom_response_body" { + type = map(object({ + content = string + content_type = string + })) + + description = <<-DOC + Defines custom response bodies that can be referenced by custom_response actions. + The map keys are used as the `key` attribute which is a unique key identifying the custom response body. + content: + Payload of the custom response. + The response body can be plain text, HTML or JSON and cannot exceed 4KB in size. + content_type: + Content Type of Response Body. + Valid values are `TEXT_PLAIN`, `TEXT_HTML`, or `APPLICATION_JSON`. + DOC + default = {} + nullable = false } variable "scope" { @@ -38,6 +68,7 @@ variable "scope" { Possible values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region us-east-1 (N. Virginia) on the AWS provider. DOC + nullable = false validation { condition = contains(["CLOUDFRONT", "REGIONAL"], var.scope) error_message = "Allowed values: `CLOUDFRONT`, `REGIONAL`." @@ -45,8 +76,11 @@ variable "scope" { } variable "visibility_config" { - type = map(string) - default = {} + type = object({ + cloudwatch_metrics_enabled = bool + metric_name = string + sampled_requests_enabled = bool + }) description = <<-DOC Defines and enables Amazon CloudWatch metrics and web request sample collection. @@ -57,10 +91,154 @@ variable "visibility_config" { sampled_requests_enabled: Whether AWS WAF should store a sampling of the web requests that match the rules. DOC + nullable = false +} + +variable "token_domains" { + type = list(string) + description = <<-DOC + Specifies the domains that AWS WAF should accept in a web request token. + This enables the use of tokens across multiple protected websites. + When AWS WAF provides a token, it uses the domain of the AWS resource that the web ACL is protecting. + If you don't specify a list of token domains, AWS WAF accepts tokens only for the domain of the protected resource. + With a token domain list, AWS WAF accepts the resource's host domain plus all domains in the token domain list, + including their prefixed subdomains. + DOC + default = null +} + +# Logging configuration +# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl_logging_configuration.html +variable "log_destination_configs" { + type = list(string) + default = [] + description = "The Amazon Kinesis Data Firehose, CloudWatch Log log group, or S3 bucket Amazon Resource Names (ARNs) that you want to associate with the web ACL" +} + +variable "redacted_fields" { + type = map(object({ + method = optional(bool, false) + uri_path = optional(bool, false) + query_string = optional(bool, false) + single_header = optional(list(string), null) + })) + default = {} + description = <<-DOC + The parts of the request that you want to keep out of the logs. + You can only specify one of the following: `method`, `query_string`, `single_header`, or `uri_path` + + method: + Whether to enable redaction of the HTTP method. + The method indicates the type of operation that the request is asking the origin to perform. + uri_path: + Whether to enable redaction of the URI path. + This is the part of a web request that identifies a resource. + query_string: + Whether to enable redaction of the query string. + This is the part of a URL that appears after a `?` character, if any. + single_header: + The list of names of the query headers to redact. + DOC + nullable = false +} + +variable "logging_filter" { + type = object({ + default_behavior = string + filter = list(object({ + behavior = string + requirement = string + condition = list(object({ + action_condition = optional(object({ + action = string + }), null) + label_name_condition = optional(object({ + label_name = string + }), null) + })) + })) + }) + default = null + description = <<-DOC + A configuration block that specifies which web requests are kept in the logs and which are dropped. + You can filter on the rule action and on the web request labels that were applied by matching rules during web ACL evaluation. + DOC +} + +# Association resources +variable "association_resource_arns" { + type = list(string) + default = [] + description = <<-DOC + A list of ARNs of the resources to associate with the web ACL. + This must be an ARN of an Application Load Balancer, Amazon API Gateway stage, or AWS AppSync. + + Do not use this variable to associate a Cloudfront Distribution. + Instead, you should use the `web_acl_id` property on the `cloudfront_distribution` resource. + For more details, refer to https://docs.aws.amazon.com/waf/latest/APIReference/API_AssociateWebACL.html + DOC + nullable = false +} + +variable "alb_names" { + description = "list of ALB names to associate with the web ACL." + type = list(string) + default = [] + nullable = false } +variable "alb_tags" { + description = "list of tags to match one or more ALBs to associate with the web ACL." + type = list(map(string)) + default = [] + nullable = false +} + +variable "association_resource_component_selectors" { + type = list(object({ + component = string + namespace = optional(string, null) + tenant = optional(string, null) + environment = optional(string, null) + stage = optional(string, null) + component_arn_output = string + })) + default = [] + description = <<-DOC + A list of Atmos component selectors to get from the remote state and associate their ARNs with the web ACL. + The components must be Application Load Balancers, Amazon API Gateway stages, or AWS AppSync. + + component: + Atmos component name + component_arn_output: + The component output that defines the component ARN + + Do not use this variable to select a Cloudfront Distribution component. + Instead, you should use the `web_acl_id` property on the `cloudfront_distribution` resource. + For more details, refer to https://docs.aws.amazon.com/waf/latest/APIReference/API_AssociateWebACL.html + DOC + nullable = false +} + +# Rules variable "byte_match_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC A rule statement that defines a string match search for AWS WAF to apply to web requests. @@ -74,7 +252,23 @@ variable "byte_match_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + statement: + positional_constraint: + Area within the portion of a web request that you want AWS WAF to search for search_string. Valid values include the following: EXACTLY, STARTS_WITH, ENDS_WITH, CONTAINS, CONTAINS_WORD. + search_string + String value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in field_to_match. field_to_match: The part of a web request that you want AWS WAF to inspect. See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match @@ -94,8 +288,87 @@ variable "byte_match_statement_rules" { DOC } +variable "geo_allowlist_statement_rules" { + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) + default = null + description = <<-DOC + A rule statement used to identify a list of allowed countries which should not be blocked by the WAF. + + name: + A friendly name of the rule. + priority: + If you define more than one Rule in a WebACL, + AWS WAF evaluates each request against the rules in order based on the value of priority. + AWS WAF processes rules with lower priority first. + + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + + statement: + country_codes: + A list of two-character country codes. + forwarded_ip_config: + fallback_behavior: + The match status to assign to the web request if the request doesn't have a valid IP address in the specified position. + Possible values: `MATCH`, `NO_MATCH` + header_name: + The name of the HTTP header to use for the IP address. + + visibility_config: + Defines and enables Amazon CloudWatch metrics and web request sample collection. + + cloudwatch_metrics_enabled: + Whether the associated resource sends metrics to CloudWatch. + metric_name: + A friendly name of the CloudWatch metric. + sampled_requests_enabled: + Whether AWS WAF should store a sampling of the web requests that match the rules. + DOC +} + variable "geo_match_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC A rule statement used to identify web requests based on country of origin. @@ -109,6 +382,18 @@ variable "geo_match_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + statement: country_codes: A list of two-character country codes. @@ -132,7 +417,23 @@ variable "geo_match_statement_rules" { } variable "ip_set_reference_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC A rule statement used to detect web requests coming from particular IP addresses or address ranges. @@ -146,9 +447,31 @@ variable "ip_set_reference_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + statement: arn: The ARN of the IP Set that this statement references. + ip_set: + Defines a new IP Set + + description: + A friendly description of the IP Set + addresses: + Contains an array of strings that specifies zero or more IP addresses or blocks of IP addresses. + All addresses must be specified using Classless Inter-Domain Routing (CIDR) notation. + ip_address_version: + Specify `IPV4` or `IPV6` ip_set_forwarded_ip_config: fallback_behavior: The match status to assign to the web request if the request doesn't have a valid IP address in the specified position. @@ -172,7 +495,83 @@ variable "ip_set_reference_statement_rules" { } variable "managed_rule_group_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + override_action = optional(string) + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = object({ + name = string + vendor_name = string + version = optional(string) + rule_action_override = optional(map(object({ + action = string + custom_request_handling = optional(object({ + insert_header = object({ + name = string + value = string + }) + }), null) + custom_response = optional(object({ + response_code = string + response_header = optional(object({ + name = string + value = string + }), null) + }), null) + })), null) + managed_rule_group_configs = optional(list(object({ + aws_managed_rules_bot_control_rule_set = optional(object({ + inspection_level = string + enable_machine_learning = optional(bool, true) + }), null) + aws_managed_rules_atp_rule_set = optional(object({ + enable_regex_in_path = optional(bool) + login_path = string + request_inspection = optional(object({ + payload_type = string + password_field = object({ + identifier = string + }) + username_field = object({ + identifier = string + }) + }), null) + response_inspection = optional(object({ + body_contains = optional(object({ + success_strings = list(string) + failure_strings = list(string) + }), null) + header = optional(object({ + name = string + success_values = list(string) + failure_values = list(string) + }), null) + json = optional(object({ + + identifier = string + success_strings = list(string) + failure_strings = list(string) + }), null) + status_code = optional(object({ + success_codes = list(string) + failure_codes = list(string) + }), null) + }), null) + }), null) + })), null) + }) + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC A rule statement used to run the rules that are defined in a managed rule group. @@ -188,13 +587,32 @@ variable "managed_rule_group_statement_rules" { The override action to apply to the rules in a rule group. Possible values: `count`, `none` + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + statement: name: The name of the managed rule group. vendor_name: The name of the managed rule group vendor. - excluded_rule: - The list of names of the rules to exclude. + version: + The version of the managed rule group. + You can set `Version_1.0` or `Version_1.1` etc. If you want to use the default version, do not set anything. + rule_action_override: + Action settings to use in the place of the rule actions that are configured inside the rule group. + You specify one override for each rule whose action you want to change. + managed_rule_group_configs: + Additional information that's used by a managed rule group. Only one rule attribute is allowed in each config. + Refer to https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-list.html for more details. visibility_config: Defines and enables Amazon CloudWatch metrics and web request sample collection. @@ -209,7 +627,50 @@ variable "managed_rule_group_statement_rules" { } variable "rate_based_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = object({ + limit = number + aggregate_key_type = string + evaluation_window_sec = optional(number) + forwarded_ip_config = optional(object({ + fallback_behavior = string + header_name = string + }), null) + scope_down_statement = optional(object({ + byte_match_statement = object({ + positional_constraint = string + search_string = string + field_to_match = object({ + all_query_arguments = optional(bool) + body = optional(bool) + method = optional(bool) + query_string = optional(bool) + single_header = optional(object({ name = string })) + single_query_argument = optional(object({ name = string })) + uri_path = optional(bool) + }) + text_transformation = list(object({ + priority = number + type = string + })) + }) + }), null) + }) + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC A rate-based rule tracks the rate of requests for each originating IP address, @@ -224,18 +685,46 @@ variable "rate_based_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + statement: aggregate_key_type: Setting that indicates how to aggregate the request counts. Possible values include: `FORWARDED_IP` or `IP` limit: The limit on requests per 5-minute period for a single originating IP address. + evaluation_window_sec: + The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. + Valid values are 60, 120, 300, and 600. Defaults to 300 (5 minutes). forwarded_ip_config: fallback_behavior: The match status to assign to the web request if the request doesn't have a valid IP address in the specified position. Possible values: `MATCH`, `NO_MATCH` header_name: The name of the HTTP header to use for the IP address. + byte_match_statement: + field_to_match: + Part of a web request that you want AWS WAF to inspect. + positional_constraint: + Area within the portion of a web request that you want AWS WAF to search for search_string. + Valid values include the following: `EXACTLY`, `STARTS_WITH`, `ENDS_WITH`, `CONTAINS`, `CONTAINS_WORD`. + search_string: + String value that you want AWS WAF to search for. + AWS WAF searches only in the part of web requests that you designate for inspection in `field_to_match`. + The maximum length of the value is 50 bytes. + text_transformation: + Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. + See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation visibility_config: Defines and enables Amazon CloudWatch metrics and web request sample collection. @@ -250,7 +739,23 @@ variable "rate_based_statement_rules" { } variable "regex_pattern_set_reference_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC A rule statement used to search web request components for matches with regular expressions. @@ -264,6 +769,18 @@ variable "regex_pattern_set_reference_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + statement: arn: The Amazon Resource Name (ARN) of the Regex Pattern Set that this statement references. @@ -286,11 +803,27 @@ variable "regex_pattern_set_reference_statement_rules" { DOC } -variable "rule_group_reference_statement_rules" { - type = list(any) +variable "regex_match_statement_rules" { + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC - A rule statement used to run the rules that are defined in an WAFv2 Rule Group. + A rule statement used to search web request components for a match against a single regular expression. action: The action that AWS WAF should take on a web request when it matches the rule's statement. @@ -301,15 +834,109 @@ variable "rule_group_reference_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + + statement: + regex_string: + String representing the regular expression. Minimum of 1 and maximum of 512 characters. + field_to_match: + The part of a web request that you want AWS WAF to inspect. + See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl.html#field_to_match + text_transformation: + Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. At least one required. + See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#text-transformation + + visibility_config: + Defines and enables Amazon CloudWatch metrics and web request sample collection. + + cloudwatch_metrics_enabled: + Whether the associated resource sends metrics to CloudWatch. + metric_name: + A friendly name of the CloudWatch metric. + sampled_requests_enabled: + Whether AWS WAF should store a sampling of the web requests that match the rules. + DOC +} + +variable "rule_group_reference_statement_rules" { + type = list(object({ + name = string + priority = number + override_action = optional(string) + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = object({ + arn = string + rule_action_override = optional(map(object({ + action = string + custom_request_handling = optional(object({ + insert_header = object({ + name = string + value = string + }) + }), null) + custom_response = optional(object({ + response_code = string + response_header = optional(object({ + name = string + value = string + }), null) + }), null) + })), null) + }) + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) + default = null + description = <<-DOC + A rule statement used to run the rules that are defined in an WAFv2 Rule Group. + + name: + A friendly name of the rule. + priority: + If you define more than one Rule in a WebACL, + AWS WAF evaluates each request against the rules in order based on the value of priority. + AWS WAF processes rules with lower priority first. + override_action: The override action to apply to the rules in a rule group. Possible values: `count`, `none` + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + statement: arn: The ARN of the `aws_wafv2_rule_group` resource. - excluded_rule: - The list of names of the rules to exclude. + rule_action_override: + Action settings to use in the place of the rule actions that are configured inside the rule group. + You specify one override for each rule whose action you want to change. visibility_config: Defines and enables Amazon CloudWatch metrics and web request sample collection. @@ -324,7 +951,23 @@ variable "rule_group_reference_statement_rules" { } variable "size_constraint_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC A rule statement that uses a comparison operator to compare a number of bytes against the size of a request component. @@ -338,6 +981,18 @@ variable "size_constraint_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + statement: comparison_operator: The operator to use to compare the request part to the size setting. @@ -365,7 +1020,23 @@ variable "size_constraint_statement_rules" { } variable "sqli_match_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC An SQL injection match condition identifies the part of web requests, @@ -380,6 +1051,18 @@ variable "sqli_match_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. + rule_label: + A List of labels to apply to web requests that match the rule match statement + + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + statement: field_to_match: The part of a web request that you want AWS WAF to inspect. @@ -401,7 +1084,23 @@ variable "sqli_match_statement_rules" { } variable "xss_match_statement_rules" { - type = list(any) + type = list(object({ + name = string + priority = number + action = string + captcha_config = optional(object({ + immunity_time_property = object({ + immunity_time = number + }) + }), null) + rule_label = optional(list(string), null) + statement = any + visibility_config = optional(object({ + cloudwatch_metrics_enabled = optional(bool) + metric_name = string + sampled_requests_enabled = optional(bool) + }), null) + })) default = null description = <<-DOC A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. @@ -415,7 +1114,19 @@ variable "xss_match_statement_rules" { AWS WAF evaluates each request against the rules in order based on the value of priority. AWS WAF processes rules with lower priority first. - xss_match_statement: + captcha_config: + Specifies how AWS WAF should handle CAPTCHA evaluations. + + immunity_time_property: + Defines custom immunity time. + + immunity_time: + The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by AWS WAF. The default setting is 300. + + rule_label: + A List of labels to apply to web requests that match the rule match statement + + statement: field_to_match: The part of a web request that you want AWS WAF to inspect. See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl#field-to-match @@ -435,36 +1146,134 @@ variable "xss_match_statement_rules" { DOC } -variable "association_resource_arns" { +# Logging configuration +# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl_logging_configuration.html +variable "log_destination_configs" { type = list(string) default = [] description = <<-DOC - A list of ARNs of the resources to associate with the web ACL. - This must be an ARN of an Application Load Balancer or an Amazon API Gateway stage. + A list of resource names/ARNs to associate Amazon Kinesis Data Firehose, Cloudwatch Log log group, or S3 bucket with the WAF logs. + Note: data firehose, log group, or bucket name must be prefixed with `aws-waf-logs-`, + e.g. `aws-waf-logs-example-firehose`, `aws-waf-logs-example-log-group`, or `aws-waf-logs-example-bucket`. DOC } -variable "log_destination_configs" { - type = list(string) - default = [] - description = "The Amazon Kinesis Data Firehose ARNs." -} - variable "redacted_fields" { - type = map(any) + type = map(object({ + method = optional(bool, false) + uri_path = optional(bool, false) + query_string = optional(bool, false) + single_header = optional(list(string), null) + })) default = {} description = <<-DOC The parts of the request that you want to keep out of the logs. - method_enabled: + You can only specify one of the following: `method`, `query_string`, `single_header`, or `uri_path` + + method: Whether to enable redaction of the HTTP method. The method indicates the type of operation that the request is asking the origin to perform. - uri_path_enabled: + uri_path: Whether to enable redaction of the URI path. This is the part of a web request that identifies a resource. - query_string_enabled: + query_string: Whether to enable redaction of the query string. This is the part of a URL that appears after a `?` character, if any. single_header: The list of names of the query headers to redact. DOC + nullable = false +} + +variable "logging_filter" { + type = object({ + default_behavior = string + filter = list(object({ + behavior = string + requirement = string + condition = list(object({ + action_condition = optional(object({ + action = string + }), null) + label_name_condition = optional(object({ + label_name = string + }), null) + })) + })) + }) + default = null + description = <<-DOC + A configuration block that specifies which web requests are kept in the logs and which are dropped. + You can filter on the rule action and on the web request labels that were applied by matching rules during web ACL evaluation. + DOC +} + +variable "log_destination_component_selectors" { + type = list(object({ + component = string + namespace = optional(string, null) + tenant = optional(string, null) + environment = optional(string, null) + stage = optional(string, null) + component_output = string + })) + default = [] + description = <<-DOC + A list of Atmos component selectors to get from the remote state and associate their names/ARNs with the WAF logs. + The components must be Amazon Kinesis Data Firehose, CloudWatch Log Group, or S3 bucket. + + component: + Atmos component name + component_output: + The component output that defines the component name or ARN + + Set `tenant`, `environment` and `stage` if the components are in different OUs, regions or accounts. + + Note: data firehose, log group, or bucket name must be prefixed with `aws-waf-logs-`, + e.g. `aws-waf-logs-example-firehose`, `aws-waf-logs-example-log-group`, or `aws-waf-logs-example-bucket`. + DOC + nullable = false +} + +# Association resources +variable "association_resource_arns" { + type = list(string) + default = [] + description = <<-DOC + A list of ARNs of the resources to associate with the web ACL. + This must be an ARN of an Application Load Balancer, Amazon API Gateway stage, or AWS AppSync. + + Do not use this variable to associate a Cloudfront Distribution. + Instead, you should use the `web_acl_id` property on the `cloudfront_distribution` resource. + For more details, refer to https://docs.aws.amazon.com/waf/latest/APIReference/API_AssociateWebACL.html + DOC + nullable = false +} + +variable "association_resource_component_selectors" { + type = list(object({ + component = string + namespace = optional(string, null) + tenant = optional(string, null) + environment = optional(string, null) + stage = optional(string, null) + component_arn_output = string + })) + default = [] + description = <<-DOC + A list of Atmos component selectors to get from the remote state and associate their ARNs with the web ACL. + The components must be Application Load Balancers, Amazon API Gateway stages, or AWS AppSync. + + component: + Atmos component name + component_arn_output: + The component output that defines the component ARN + + Set `tenant`, `environment` and `stage` if the components are in different OUs, regions or accounts. + + Do not use this variable to select a Cloudfront Distribution component. + Instead, you should use the `web_acl_id` property on the `cloudfront_distribution` resource. + For more details, refer to https://docs.aws.amazon.com/waf/latest/APIReference/API_AssociateWebACL.html + DOC + nullable = false } diff --git a/modules/waf/versions.tf b/modules/waf/versions.tf index e89eb16ed..f0f3f0553 100644 --- a/modules/waf/versions.tf +++ b/modules/waf/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0.0" + required_version = ">= 1.3.0" required_providers { aws = { source = "hashicorp/aws" - version = "~> 4.0" + version = ">= 5.0" } } } diff --git a/modules/zscaler/README.md b/modules/zscaler/README.md index f0df84185..787124b9e 100644 --- a/modules/zscaler/README.md +++ b/modules/zscaler/README.md @@ -1,8 +1,17 @@ +--- +tags: + - component/zscaler + - layer/unassigned + - provider/aws +--- + # Component: `zscaler` This component is responsible for provisioning ZScaler Private Access Connector instances on Amazon Linux 2 AMIs. -Prior to provisioning this component, it is required that a SecureString SSM Parameter containing the ZScaler App Connector Provisioning Key is populated in each account corresponding to the regional stack the component is deployed to, with the name of the SSM Parameter matching the value of `var.zscaler_key`. +Prior to provisioning this component, it is required that a SecureString SSM Parameter containing the ZScaler App +Connector Provisioning Key is populated in each account corresponding to the regional stack the component is deployed +to, with the name of the SSM Parameter matching the value of `var.zscaler_key`. This parameter should be populated using `chamber`, which is included in the geodesic image: @@ -10,7 +19,8 @@ This parameter should be populated using `chamber`, which is included in the geo chamber write zscaler key ``` -Where is the ZScaler App Connector Provisioning Key. For more information on how to generate this key, see: [ZScaler documentation on Configuring App Connectors](https://help.zscaler.com/zpa/configuring-connectors). +Where `` is the ZScaler App Connector Provisioning Key. For more information on how to generate this key, see: +[ZScaler documentation on Configuring App Connectors](https://help.zscaler.com/zpa/configuring-connectors). ## Usage @@ -26,7 +36,8 @@ components: zscaler_count: 2 ``` -Preferably, regional stack configurations can be kept _DRY_ by importing `catalog/zscaler` via the `imports` list at the top of the configuration. +Preferably, regional stack configurations can be kept _DRY_ by importing `catalog/zscaler` via the `imports` list at the +top of the configuration. ``` import: @@ -34,6 +45,7 @@ import: - catalog/zscaler ``` + ## Requirements @@ -44,7 +56,7 @@ import: | [null](#requirement\_null) | >= 3.0 | | [random](#requirement\_random) | >= 3.0 | | [template](#requirement\_template) | >= 2.2 | -| [utils](#requirement\_utils) | >= 0.4.3 | +| [utils](#requirement\_utils) | >= 1.10.0 | ## Providers @@ -68,7 +80,7 @@ import: | [aws_iam_role_policy_attachment.ssm_core](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_ami.amazon_linux_2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ssm_parameter.zscaler_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | -| [template_file.userdata](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.userdata](https://registry.terraform.io/providers/cloudposse/template/latest/docs/data-sources/file) | data source | ## Inputs @@ -84,8 +96,7 @@ import: | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no | | [environment](#input\_environment) | Environment, e.g. 'uw2', 'us-west-2', OR 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for default, which is `0`.
Does not affect `id_full`. | `number` | `null` | no | -| [import\_role\_arn](#input\_import\_role\_arn) | IAM Role ARN to use when importing a resource | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | The instance family to use for the ZScaler EC2 instances. | `string` | `"r5n.medium"` | no | +| [instance\_type](#input\_instance\_type) | The instance family to use for the ZScaler EC2 instances. | `string` | `"m5n.large"` | no | | [label\_key\_case](#input\_label\_key\_case) | The letter case of label keys (`tag` names) (i.e. `name`, `namespace`, `environment`, `stage`, `attributes`) to use in `tags`.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no | | [label\_order](#input\_label\_order) | The naming order of the id output and Name tag.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 5 elements, but at least one must be present. | `list(string)` | `null` | no | | [label\_value\_case](#input\_label\_value\_case) | The letter case of output label values (also used in `tags` and `id`).
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Default value: `lower`. | `string` | `null` | no | @@ -94,7 +105,7 @@ import: | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Regex to replace chars with empty string in `namespace`, `environment`, `stage` and `name`.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS region | `string` | n/a | yes | | [secrets\_store\_type](#input\_secrets\_store\_type) | Secret store type for Zscaler provisioning keys. Valid values: `SSM`, `ASM` (but `ASM` not currently supported) | `string` | `"SSM"` | no | -| [security\_group\_rules](#input\_security\_group\_rules) | A list of maps of Security Group rules.
The values of map is fully complated with `aws_security_group_rule` resource.
To get more info see [security\_group\_rule](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule). | `list(any)` |
[
{
"cidr_blocks": [
"0.0.0.0/0"
],
"from_port": 0,
"protocol": "-1",
"to_port": 65535,
"type": "egress"
}
]
| no | +| [security\_group\_rules](#input\_security\_group\_rules) | A list of maps of Security Group rules.
The values of map is fully completed with `aws_security_group_rule` resource.
To get more info see [security\_group\_rule](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule). | `list(any)` |
[
{
"cidr_blocks": [
"0.0.0.0/0"
],
"from_port": 0,
"protocol": "-1",
"to_port": 65535,
"type": "egress"
}
]
| no | | [stage](#input\_stage) | Stage, e.g. 'prod', 'staging', 'dev', OR 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | | [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit','XYZ')` | `map(string)` | `{}` | no | | [zscaler\_count](#input\_zscaler\_count) | The number of Zscaler instances. | `number` | `1` | no | @@ -107,8 +118,11 @@ import: | [instance\_id](#output\_instance\_id) | Instance ID | | [private\_ip](#output\_private\_ip) | Private IP of the instance | + ## References -* [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/zscaler) - Cloud Posse's upstream component + +- [cloudposse/terraform-aws-components](https://github.com/cloudposse/terraform-aws-components/tree/main/modules/zscaler) - + Cloud Posse's upstream component [](https://cpco.io/component) diff --git a/modules/zscaler/default.auto.tfvars b/modules/zscaler/default.auto.tfvars deleted file mode 100644 index 063bfdca6..000000000 --- a/modules/zscaler/default.auto.tfvars +++ /dev/null @@ -1,6 +0,0 @@ -enabled = false - -name = "zscaler" - -# Cheapest instance that satisfies DenyInstancesWithoutEncryptionInTransit SCP (see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/data-protection.html#encryption-transit) -instance_type = "m5n.large" diff --git a/modules/zscaler/providers.tf b/modules/zscaler/providers.tf index fd46aae55..ef923e10a 100644 --- a/modules/zscaler/providers.tf +++ b/modules/zscaler/providers.tf @@ -1,10 +1,15 @@ provider "aws" { region = var.region - assume_role { - # `terraform import` will not use data from a data source, - # so on import we have to explicitly specify the role - role_arn = coalesce(var.import_role_arn, module.iam_roles.terraform_role_arn) + # Profile is deprecated in favor of terraform_role_arn. When profiles are not in use, terraform_profile_name is null. + profile = module.iam_roles.terraform_profile_name + + dynamic "assume_role" { + # module.iam_roles.terraform_role_arn may be null, in which case do not assume a role. + for_each = compact([module.iam_roles.terraform_role_arn]) + content { + role_arn = assume_role.value + } } } @@ -12,9 +17,3 @@ module "iam_roles" { source = "../account-map/modules/iam-roles" context = module.this.context } - -variable "import_role_arn" { - type = string - default = null - description = "IAM Role ARN to use when importing a resource" -} diff --git a/modules/zscaler/variables.tf b/modules/zscaler/variables.tf index d3492dd92..2dd827758 100644 --- a/modules/zscaler/variables.tf +++ b/modules/zscaler/variables.tf @@ -22,8 +22,11 @@ variable "aws_ssm_enabled" { } variable "instance_type" { - type = string - default = "r5n.medium" + type = string + # We default to m5n.large because it is cheapest instance that satisfies + # DenyInstancesWithoutEncryptionInTransit SCP + # (see: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/data-protection.html#encryption-transit ) + default = "m5n.large" description = "The instance family to use for the ZScaler EC2 instances." } variable "secrets_store_type" { @@ -62,7 +65,7 @@ variable "security_group_rules" { ] description = <<-EOT A list of maps of Security Group rules. - The values of map is fully complated with `aws_security_group_rule` resource. + The values of map is fully completed with `aws_security_group_rule` resource. To get more info see [security_group_rule](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule). EOT } diff --git a/modules/zscaler/versions.tf b/modules/zscaler/versions.tf index 007db2651..a35488bbe 100644 --- a/modules/zscaler/versions.tf +++ b/modules/zscaler/versions.tf @@ -7,7 +7,7 @@ terraform { version = ">= 3.0" } template = { - source = "hashicorp/template" + source = "cloudposse/template" version = ">= 2.2" } null = { @@ -20,7 +20,7 @@ terraform { } utils = { source = "cloudposse/utils" - version = ">= 0.4.3" + version = ">= 1.10.0" } } } diff --git a/rootfs/usr/local/bin/aws-config b/rootfs/usr/local/bin/aws-config index dbf2579c2..beb7e304b 100755 --- a/rootfs/usr/local/bin/aws-config +++ b/rootfs/usr/local/bin/aws-config @@ -1,15 +1,47 @@ #!/bin/bash -## WORK IN PROGRESS +## Production ready, but still being developed and subject to frequent breaking changes. -## Usage: -## aws-config saml > rootfs/etc/aws-config/aws-config-saml +functions+=(help) +function help() { + fns=($(printf '%s\n' "${functions[@]}" | sort | uniq)) + # usage=${fns//$'\n'/ | } + printf "Usage: %s \n Where is one of:\n\n" "$(basename $0)" + printf ' %s\n' "${fns[@]}" + echo + + cat <<'EOF' + +## Examples: +## aws-config teams > rootfs/etc/aws-config/aws-config-teams ## Generates full `aws` CLI configuration for use in Geodesic -## based on SAML authentication and SAML roles. +## to access aws-teams and aws-team-roles. +## +## aws-config switch-roles > rootfs/etc/aws-config/aws-extend-switch-roles +## aws-config switch-roles billing > rootfs/etc/aws-config/aws-extend-switch-roles-billing +## aws-config switch-roles billing_admin > rootfs/etc/aws-config/aws-extend-switch-roles-billing_admin +## Generates configuration for AWS Extend Switch Roles browser plugin +## https://github.com/tilfinltd/aws-extend-switch-roles +## +## aws-config spacelift > rootfs/etc/aws-config/aws-config-spacelift +## Generates `aws` CLI/SDK configuration for Spacelift workers to use ## -## aws-config saml admin > rootfs/etc/aws-config/aws-switch-roles -## Generates configuration for AWS Switch Role browser plugin, -## except it omits the source profile. TODO: generate source profile + +EOF + +} + +# main needs to be defined before sourcing other files + +function main() { + if printf '%s\0' "${functions[@]}" | grep -Fxqz -- "$1"; then + "$@" + else + help + exit 99 + fi +} + ## TODO: maybe pull the source files from S3 rather than file system account_sources=("$ATMOS_BASE_PATH/"components/terraform/account-map/account-info/*sh) @@ -17,6 +49,13 @@ iam_sources=("$ATMOS_BASE_PATH/"components/terraform/aws-team-roles/iam-role-inf namespaces=($(for script in "${account_sources[@]}"; do $script namespace; done)) +declare -A source_profiles +for script in "${account_sources[@]}"; do + namespace=$($script namespace) + source_profiles[$namespace]=$($script source-profile) + [[ -n "${source_profiles[$namespace]}" ]] || source_profiles[$namespace]="${namespace}-identity" +done +unset namespace unset _auto_generated_warning function _auto-generated-warning() { @@ -45,7 +84,6 @@ function _extra-profiles() { # Usage: _saml [ ...] function _saml() { local namespace - local source_profile local selected_roles local region="${AWS_REGION:-${AWS_DEFAULT_REGION}}" @@ -57,7 +95,7 @@ function _saml() { [[ -n $selected_roles ]] && ! [[ $selected_roles =~ " $role " ]] && continue printf "[profile %s]\n" "$($source profile $role)" [[ -n ${region} ]] && printf "region = %s\n" "$region" - printf "source_profile = %s-identity\n" "$namespace" + printf "source_profile = %s\n" "${source_profiles[$namespace]}" printf "role_arn = %s\n\n" $($source role-arn $role) done done @@ -78,20 +116,31 @@ function saml() { _saml "$@" } +# Generate AWS config file for assuming `aws-teams` and `aws-team-roles` roles. +# Will generate a profile for every role in every account, unless a role is specified, +# in which case it will only generate a profile for that role in every account. +# Usage: teams [] +functions+=(teams) +function teams() { + saml "$@" +} + functions+=(switch-roles) function switch-roles() { local region="${AWS_REGION:-${AWS_DEFAULT_REGION}}" + printf ";; This configuration file is for the AWS Extend Switch Roles browser plugin.\n\n" + _auto-generated-warning for namespace in "${namespaces[@]}"; do - printf "[profile %s-identity]\n" "$namespace" + printf "[profile %s]\n" "${source_profiles[$namespace]}" [[ -n ${region} ]] && printf "region = %s\n" "$region" printf "aws_account_id = %s\n\n" $($0 -n $namespace account-profile $($0 -n $namespace account-for-role identity)) done echo _no_source_profile=skip - saml admin + saml "${@:-admin}" } functions+=(spacelift) @@ -105,7 +154,7 @@ function spacelift() { # TODO: lookup Spacelift target Role ARN rather than guess/hard code it. profile_base="$($0 -n $namespace account-profile $($0 -n $namespace account-for-role identity))" account_id="$($0 -n $namespace account-id $($0 -n $namespace account-for-role identity))" - printf "[profile %s-identity]\n" "$namespace" + printf "[profile %s]\n" "${source_profiles[$namespace]}" [[ -n ${region} ]] && printf "region = %s\n" "$region" printf "role_arn = arn:aws:iam::%s:role/%s-spacelift\n" "$account_id" "$profile_base" printf "credential_source = Ec2InstanceMetadata\n" @@ -114,7 +163,12 @@ function spacelift() { echo _no_source_profile=skip - saml admin terraform + saml admin terraform planner +} + +functions+=(accounts) +function accounts() { + account-ids } case $1 in @@ -123,32 +177,32 @@ case $1 in shift ;; -n*) - if [[ $x == ${x#*=} ]]; then + if [[ $1 == ${1#*=} ]]; then # -n namespace target_namespace=($2) shift 2 else # -n=namespace - target_namespace=(${x#*=}) + target_namespace=(${1#*=}) shift fi ;; - account*) + *) if [[ -n $NAMESPACE ]]; then target_namespace=($NAMESPACE) - elif [[ ${#namespaces[@]} == 1 ]]; then - target_namespace=("${namespaces[0]}") else - echo "ERROR: NAMESPACE not set. Please set NAMESPACE or use -n to specify the namespace to use." - exit 1 + target_namespace=("${namespaces[@]}") fi ;; - *) +esac + +case $1 in + # These commands automatically use all namespaces, so we don't need to loop over them. + spacelift|switch-roles|teams|saml) target_namespace=("${namespaces[0]}") ;; esac -# Use main() from account-info script args=("$@") for namespace in "${target_namespace[@]}"; do source "$ATMOS_BASE_PATH/components/terraform/account-map/account-info/$namespace"*.sh @@ -156,6 +210,8 @@ for namespace in "${target_namespace[@]}"; do export CONFIG_NAMESPACE=$namespace fi main "${args[@]}" - [[ $? == 99 ]] && exit 0 + exit_code=$? + [[ $exit_code == 99 ]] && exit 0 done +exit $exit_code