Skip to content

Logic to patch RDS proxy #1545

Logic to patch RDS proxy

Logic to patch RDS proxy #1545

name: CICD
on:
workflow_dispatch:
inputs:
run-reset-deployments:
description: "Reset deployment: Clean start"
required: false
default: false
type: boolean
run-tests:
description: "Run tests step"
required: false
default: false
type: boolean
push:
branches:
- "development"
tags:
- "v*"
pull_request:
branches:
- "development"
# pull_request_review:
# types: [submitted]
concurrency:
group: cicd-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
name: Build images and push to GHCR
permissions:
id-token: write # This is required for requesting the JWT
packages: write # To push to GHCR.io
runs-on: ubuntu-latest
concurrency:
group: cicd-build-${{ matrix.image }}
cancel-in-progress: false
outputs:
image_version: ${{ steps.meta.outputs.version }}
strategy:
matrix:
image:
[
ledger-nodes,
ledger-browser,
tails-server,
governance-ga-agent,
governance-trust-registry,
governance-multitenant-web,
governance-webhooks-web,
governance-multitenant-agent,
governance-endorser,
pytest,
]
include:
- image: governance-ga-agent
context: .
file: dockerfiles/agents/Dockerfile.agent
- image: governance-trust-registry
context: .
file: dockerfiles/trustregistry/Dockerfile
- image: governance-multitenant-web
context: .
file: dockerfiles/fastapi/Dockerfile
- image: governance-webhooks-web
context: .
file: dockerfiles/webhooks/Dockerfile
- image: governance-multitenant-agent
context: .
file: dockerfiles/agents/Dockerfile.author.agent
- image: ledger-browser
context: https://github.com/bcgov/von-network.git#main
file: Dockerfile
- image: ledger-nodes
context: https://github.com/bcgov/von-network.git#main
file: Dockerfile
- image: governance-endorser
context: .
file: dockerfiles/endorser/Dockerfile
- image: tails-server
context: https://github.com/bcgov/indy-tails-server.git#v1.1.0
file: docker/Dockerfile.tails-server
- image: pytest
context: .
file: dockerfiles/tests/Dockerfile
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Docker Metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}
tags: |
type=sha,prefix=pr-${{ github.event.pull_request.number }}-,priority=601,enable=${{ github.event_name == 'pull_request' }}
type=sha,prefix={{branch}}-,priority=601,enable=${{ github.event_name == 'push' && github.ref_type == 'branch' }}
type=ref,event=branch,priority=600
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
- name: Build and push Docker images
uses: docker/build-push-action@v5
with:
context: ${{ matrix.context }}
file: ${{ matrix.file }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha,scope=build-${{ matrix.image }}
cache-to: type=gha,mode=max,scope=build-${{ matrix.image }}
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python 3.9
uses: actions/setup-python@v4
with:
python-version: 3.9
cache: pip
- name: Install test dependencies.
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run Pylint
run: |
pylint app --rcfile=.pylintrc -r n --msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" --exit-zero > pylintreport.txt
test:
name: Test
needs:
- build
runs-on: ubuntu-latest
outputs:
test_success: ${{ steps.test.outputs.test_success }}
strategy:
matrix:
module:
- { test: "app/tests/e2e/test_definitions.py", cov: "--cov=app", id: "1" }
- { test: "app/tests/e2e/issuer/did_sov/", cov: "--cov=app", id: "2" }
- { test: "app/tests/e2e/issuer/did_key_*", cov: "--cov=app", id: "3" }
- { test: "app/tests/e2e/test_verifier.py", cov: "--cov=app", id: "4" }
- { test: "app/tests/e2e/test_trust_registry_integration.py", cov: "--cov=app", id: "5" }
- { test: "app --ignore=app/tests/e2e/test_definitions.py
--ignore=app/tests/e2e/issuer/did_sov/
--ignore=app/tests/e2e/issuer/did_key_*
--ignore=app/tests/e2e/test_verifier.py
--ignore=app/tests/e2e/test_trust_registry_integration.py", cov: "--cov=app", id: "6" }
- { test: "endorser trustregistry webhooks", cov: "--cov=endorser --cov=trustregistry --cov=webhooks", id: "7" }
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python 3.9
uses: actions/setup-python@v4
with:
python-version: 3.9
cache: pip
- name: Install test dependencies.
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Start Test Harness
run: ./manage up-as-daemon-no-build
shell: bash
env:
REGISTRY: ghcr.io/${{ github.repository_owner }}
IMAGE_TAG: ${{ needs.build.outputs.image_version }}
- name: Test with pytest
id: test
run: |
set +e
pytest --durations=0 ${{ matrix.module.cov }} ${{ matrix.module.test }} | tee test_output.txt
EXIT_CODE=${PIPESTATUS[0]}
set -e
echo "Exit code: $EXIT_CODE"
mkdir -p coverage-files
mv .coverage coverage-files/.coverage.${{ matrix.module.id }}
# very hacky way to get around the fact that teardown fails even if tests pass
TEARDOWN_ERROR=false
SINGLE_ERROR=false
TEST_FAILURES=0
if grep -q "ERROR at teardown" test_output.txt; then
echo "ERROR at teardown"
TEARDOWN_ERROR=true
fi
if grep -q ", 1 error in" test_output.txt; then
echo "Only 1 error total"
SINGLE_ERROR=true
fi
# Count the number of test failures
TEST_FAILURES=$(grep -c "^FAILED" test_output.txt || true)
echo "Number of test failures: $TEST_FAILURES"
if [ "$TEARDOWN_ERROR" = true ] && [ "$SINGLE_ERROR" = true ] && [ "$TEST_FAILURES" -eq 0 ]; then
echo "Tests passed with teardown error"
exit 0
else
if [ "$EXIT_CODE" -ne 0 ]; then
echo "test_success=false" >> $GITHUB_OUTPUT
else
echo "test_success=true" >> $GITHUB_OUTPUT
fi
exit $EXIT_CODE
fi
- name: Upload .coverage files as artifact
uses: actions/upload-artifact@v3
with:
name: coverage-files-${{ matrix.module.id }}
path: coverage-files/.coverage.${{ matrix.module.id }}
status-check:
name: Status Check
runs-on: ubuntu-latest
needs: test
if: always()
steps:
- name: Check if any test failed
run: exit 1
if: needs.test.outputs.test_success == 'false'
combine-coverage:
name: Coverage
runs-on: ubuntu-latest
needs: status-check
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python 3.9
uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Install dependencies
run: pip install coverage
- name: Download all .coverage artifacts
uses: actions/download-artifact@v3
with:
path: coverage-files
- name: Move coverage files to top-level directory
run: |
for dir in coverage-files/coverage-files-*; do
mv "$dir"/.coverage.* .
done
- name: Combine coverage files
run: |
coverage combine
coverage report
- name: Generate XML coverage report
run: coverage xml
- name: Upload coverage to Codacy
run: bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml
env:
CODACY_PROJECT_TOKEN: ${{ secrets.CODACY_PROJECT_TOKEN }}
deploy:
name: Deploy to EKS
environment:
name: dev
needs:
- build
permissions:
id-token: write # Required to authenticate with AWS
contents: read # Required to clone this repository
checks: write # Required for action-junit-report
pull-requests: write # Required to comment on PRs for Pytest coverage comment
runs-on: ubuntu-latest
if: github.actor != 'dependabot[bot]'
env:
TAILSCALE_VERSION: 1.50.1
HELMFILE_VERSION: v0.157.0
HELM_VERSION: v3.13.1
OUTPUT_FILE: test_output.xml
COVERAGE_FILE: test_coverage.txt
concurrency:
group: cicd-deploy
cancel-in-progress: false
outputs:
output: ${{ steps.updated_deployments.outputs.success }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Checkout Charts
uses: actions/checkout@v4
with:
repository: didx-xyz/charts
token: ${{ secrets.PAT }}
path: charts
ref: single-rep-tails
- name: Install dependencies
run: sudo apt-get install -y postgresql-client redis-tools
- name: Sops Binary Installer
uses: mdgreenwald/[email protected]
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-region: af-south-1
role-to-assume: arn:aws:iam::402177810328:role/cicd
role-session-name: github-cicd
- name: Update Kubeconfig
run: aws eks update-kubeconfig --name cloudapi-dev
- uses: tailscale/github-action@main
with:
authkey: ${{ secrets.TAILSCALE_AUTHKEY }}
version: ${{ env.TAILSCALE_VERSION }}
- name: Helmfile Diff
if: github.event.inputs.run-reset-deployments == 'true'
uses: helmfile/[email protected]
with:
helmfile-args: |
diff \
--environment ${{ vars.ENVIRONMENT }} \
-f ./charts/helmfiles/aries-cloudapi-python.yaml \
--set image.tag=${{ env.IMAGE_TAG }} \
--set image.registry=ghcr.io/${{ github.repository_owner }}
helm-plugins: |
https://github.com/databus23/helm-diff,
https://github.com/jkroepke/helm-secrets
helmfile-version: ${{ env.HELMFILE_VERSION }}
helm-version: ${{ env.HELM_VERSION }}
env:
IMAGE_TAG: ${{ needs.build.outputs.image_version }}
- name: Helmfile Destroy
id: destroy_deployments
if: github.event.inputs.run-reset-deployments == 'true'
uses: helmfile/[email protected]
with:
helmfile-args: |
destroy \
--environment ${{ vars.ENVIRONMENT }} \
-f ./charts/helmfiles/aries-cloudapi-python.yaml
helm-plugins: |
https://github.com/databus23/helm-diff,
https://github.com/jkroepke/helm-secrets
helmfile-version: ${{ env.HELMFILE_VERSION }}
helm-version: ${{ env.HELM_VERSION }}
env:
IMAGE_TAG: ${{ needs.build.outputs.image_version }}
- name: Wait for pods to terminate
if: github.event.inputs.run-reset-deployments == 'true'
run: |
while true; do
terminating_pods_count=$(kubectl -n dev-cloudapi get pods --field-selector=status.phase!=Running,status.phase!=Succeeded -o jsonpath='{.items[*].metadata.name}')
if [ -z "$terminating_pods_count" ]; then
echo "All pods terminated"
break
else
echo "Waiting for pods to terminate"
sleep 10
fi
done
- name: Drop DBs
id: drop_dbs
if: github.event.inputs.run-reset-deployments == 'true'
env:
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_HOST: ${{ secrets.DB_HOST }}
DB_USER: ${{ secrets.DB_USER}}
DB_PORT: ${{ secrets.DB_PORT }}
DB_EXCLUDE: ${{ secrets.DB_EXCLUDE }}
GA_ACAPY_WALLET_NAME: ${{ secrets.GA_ACAPY_WALLET_NAME }}
MT_ACAPY_WALLET_NAME: ${{ secrets.MT_ACAPY_WALLET_NAME }}
TRUST_REGISTRY_DB_OWNER: ${{ secrets.TRUST_REGISTRY_DB_OWNER }}
run: |
bash ./scripts/aurora-delete.sh -o $GA_ACAPY_WALLET_NAME -d
bash ./scripts/aurora-delete.sh -o $MT_ACAPY_WALLET_NAME -d
bash ./scripts/aurora-delete.sh -o $TRUST_REGISTRY_DB_OWNER -d
bash ./scripts/aurora-delete.sh -o $TRUST_REGISTRY_DB_OWNER -c
- name: List Elasticache Redis
if: github.event.inputs.run-reset-deployments == 'true'
env:
REDIS_HOST: ${{ secrets.REDIS_HOST }}
REDIS_PORT: ${{ secrets.REDIS_PORT }}
run: redis-cli -h $REDIS_HOST -p $REDIS_PORT --scan --pattern '*'
- name: Clean Elasticache Redis
if: github.event.inputs.run-reset-deployments == 'true'
env:
REDIS_HOST: ${{ secrets.REDIS_HOST }}
REDIS_PORT: ${{ secrets.REDIS_PORT }}
run: redis-cli -h $REDIS_HOST -p $REDIS_PORT FLUSHALL
- name: Helmfile Apply # Apply default helmfile (without RDS proxy) when resetting deployments.
if: github.event.inputs.run-reset-deployments == 'true'
uses: helmfile/[email protected]
with:
helmfile-args: |
apply \
--environment ${{ vars.ENVIRONMENT }} \
-f ./charts/helmfiles/aries-cloudapi-python.yaml \
--set image.tag=${{ env.IMAGE_TAG }} \
--set image.registry=ghcr.io/${{ github.repository_owner }}
helm-plugins: |
https://github.com/databus23/helm-diff,
https://github.com/jkroepke/helm-secrets
helmfile-version: ${{ env.HELMFILE_VERSION }}
helm-version: ${{ env.HELM_VERSION }}
env:
IMAGE_TAG: ${{ needs.build.outputs.image_version }}
- name: Helmfile Apply (Exclude Agents) # Always exclude agents and apply (zero diff if resetting deployments)
uses: helmfile/[email protected]
with:
helmfile-args: |
apply \
--environment ${{ vars.ENVIRONMENT }} \
-f ./charts/helmfiles/aries-cloudapi-python.yaml \
--set image.tag=${{ env.IMAGE_TAG }} \
--set image.registry=ghcr.io/${{ github.repository_owner }} \
--selector app!=governance-ga-agent,app!=governance-multitenant-agent
helm-plugins: |
https://github.com/databus23/helm-diff,
https://github.com/jkroepke/helm-secrets
helmfile-version: ${{ env.HELMFILE_VERSION }}
helm-version: ${{ env.HELM_VERSION }}
env:
IMAGE_TAG: ${{ needs.build.outputs.image_version }}
- name: Create values file
run: |
echo 'env:' > ./charts/helmfiles/acapy-wallet-storage-config.yaml
echo ' ACAPY_WALLET_STORAGE_CONFIG: |-' >> ./charts/helmfiles/acapy-wallet-storage-config.yaml
echo ' ${{ secrets.ACAPY_WALLET_STORAGE_CONFIG }}' >> ./charts/helmfiles/acapy-wallet-storage-config.yaml
pwd
ls -la
- name: Helmfile Patch Proxy # Always patch agents with proxy.
id: patch_proxy
uses: helmfile/[email protected]
with:
helmfile-args: |
apply \
--environment ${{ vars.ENVIRONMENT }} \
-f ./charts/helmfiles/aries-cloudapi-python.yaml \
--set image.tag=${{ env.IMAGE_TAG }} \
--set image.registry=ghcr.io/${{ github.repository_owner }} \
--selector app=governance-ga-agent \
--selector app=governance-multitenant-agent \
--set env.WALLET_DB_HOST=${{ secrets.DB_PROXY_HOST }} \
--values acapy-wallet-storage-config.yaml
helm-plugins: |
https://github.com/databus23/helm-diff,
https://github.com/jkroepke/helm-secrets
helmfile-version: ${{ env.HELMFILE_VERSION }}
helm-version: ${{ env.HELM_VERSION }}
env:
IMAGE_TAG: ${{ needs.build.outputs.image_version }}
- name: Helmfile apply pytest
# if: github.event.review.state == 'approved' || github.event_name == 'push' || ( github.event.inputs.run-reset-deployments == 'true' && github.event.inputs.run-tests == 'true' ) || github.event.inputs.run-tests == 'true'
id: pytest
uses: helmfile/[email protected]
with:
helmfile-args: |
apply \
--environment ${{ vars.ENVIRONMENT }} \
-f ./charts/helmfiles/aries-capi-test.yaml \
--set image.tag=${{ env.IMAGE_TAG }} \
--set image.registry=ghcr.io/${{ github.repository_owner }}
helm-plugins: |
https://github.com/databus23/helm-diff,
https://github.com/jkroepke/helm-secrets
helmfile-version: ${{ env.HELMFILE_VERSION }}
helm-version: ${{ env.HELM_VERSION }}
env:
IMAGE_TAG: ${{ needs.build.outputs.image_version }}
- name: Wait for pytest and print logs
# if: github.event.review.state == 'approved' || github.event_name == 'push' || ( github.event.inputs.run-reset-deployments == 'true' && github.event.inputs.run-tests == 'true' ) || github.event.inputs.run-tests == 'true'
run: |
while true; do
# Check if the job is complete or failed
COMPLETION_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.succeeded}')
FAILURE_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.failed}')
if [ "$COMPLETION_STATUS" == "1" ] || [ "$FAILURE_STATUS" == "1" ]; then
echo "Job $JOB_NAME has completed."
break
else
echo "Waiting for job to complete..."
sleep 10
fi
done
# Get all pods for the job
pods=$(kubectl get pods -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath='{.items[*].metadata.name}')
# Loop through the pods and get logs
for pod in $pods
do
echo "Logs for Pod: $pod"
kubectl logs -n $NAMESPACE $pod
done
env:
JOB_NAME: cloudapi-pytest
NAMESPACE: dev-cloudapi
- name: Copy k8s pytest results
# if: github.event.review.state == 'approved' || github.event_name == 'push' || ( github.event.inputs.run-reset-deployments == 'true' && github.event.inputs.run-tests == 'true' ) || github.event.inputs.run-tests == 'true'
run: |
echo "apiVersion: v1
kind: Pod
metadata:
name: $POD_NAME
namespace: $NAMESPACE
spec:
containers:
- name: $POD_NAME
image: $CONTAINER_IMAGE
command: [\"sleep\", \"3600\"]
volumeMounts:
- name: my-volume
mountPath: $MOUNT_PATH
volumes:
- name: my-volume
persistentVolumeClaim:
claimName: $PVC_NAME
restartPolicy: Never" > busybox-pod.yaml
kubectl apply -f busybox-pod.yaml
# Wait for the pod to be ready
echo "Waiting for pod to be ready..."
kubectl -n $NAMESPACE wait --for=condition=ready pod/$POD_NAME --timeout=60s
# Copy the file from the pod to your local system
echo "Copying file from pod..."
kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/$OUTPUT_FILE $LOCAL_PATH/$OUTPUT_FILE
kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/$COVERAGE_FILE $LOCAL_PATH/$COVERAGE_FILE
# Clean up: delete the temporary pod
echo "Cleaning up..."
kubectl -n $NAMESPACE delete pod $POD_NAME
echo "Done!"
env:
PVC_NAME: cloudapi-pytest
POD_NAME: pytest-results
CONTAINER_IMAGE: busybox
MOUNT_PATH: /mnt
LOCAL_PATH: ./
NAMESPACE: dev-cloudapi
- name: Publish Test Report
uses: mikepenz/action-junit-report@v4
if: success() || failure() # always run even if the previous step fails
with:
report_paths: './test_output.xml'
fail_on_failure: true
detailed_summary: true
require_passed_tests: true
- name: Pytest coverage comment
if: success() || failure() # always run even if the previous step fails
uses: MishaKav/pytest-coverage-comment@main
with:
pytest-coverage-path: ./test_coverage.txt
junitxml-path: ./test_output.xml
create-new-comment: true
- name: Helmfile destroy pytest
uses: helmfile/[email protected]
if: success() || failure() # always run even if the previous step fails
with:
helmfile-args: |
destroy \
--environment ${{ vars.ENVIRONMENT }} \
-f ./charts/helmfiles/aries-capi-test.yaml
helm-plugins: |
https://github.com/databus23/helm-diff
helmfile-version: ${{ env.HELMFILE_VERSION }}
helm-version: ${{ env.HELM_VERSION }}
env:
IMAGE_TAG: ${{ needs.build.outputs.image_version }}
# - name: Output
# id: updated_deployments
# if: steps.update_deployments.outcome == 'success'
# run: echo "success=true" >> "$GITHUB_OUTPUT"