diff --git a/helm/didx-cloud/Chart.yaml b/helm/didx-cloud/Chart.yaml new file mode 100644 index 000000000..58416b974 --- /dev/null +++ b/helm/didx-cloud/Chart.yaml @@ -0,0 +1,37 @@ +apiVersion: v2 +name: didx-cloud +description: | + A generic Helm Chart to deploy the DIDx Aries CloudAPI stack. + Due to the similarity between the components, and to reduce duplication, this + chart can be used to deploy any of the following components: + - endorser + - governance-agent + - governance-web + - ledger-browser + - ledger-nodes + - multitenant-agent + - multitenant-web + - public-web + - tails-server + - tenant-web + - trust-registry + - waypoint + By simply modifying the values file accordingly. +type: application + +version: 0.2.0 + +home: https://github.com/didx-xyz/aries-cloudapi-python +sources: + - https://github.com/didx-xyz/aries-cloudapi-python + - https://github.com/didx-xyz/aries-cloudcontroller-python + - https://github.com/bcgov/indy-tails-server + - https://github.com/bcgov/von-network + +keywords: + - identity + - hyperledger + - aries + - didx + - ssi + - self-sovereign diff --git a/helm/didx-cloud/README.md b/helm/didx-cloud/README.md new file mode 100644 index 000000000..1f3b92546 --- /dev/null +++ b/helm/didx-cloud/README.md @@ -0,0 +1 @@ +# didx:cloud Helm Chart diff --git a/helm/didx-cloud/conf/.gitignore b/helm/didx-cloud/conf/.gitignore new file mode 100644 index 000000000..0854956ae --- /dev/null +++ b/helm/didx-cloud/conf/.gitignore @@ -0,0 +1,2 @@ +# We want to keep this local dir +!local diff --git a/helm/didx-cloud/conf/local/endorser.yaml b/helm/didx-cloud/conf/local/endorser.yaml new file mode 100644 index 000000000..07a7fa72f --- /dev/null +++ b/helm/didx-cloud/conf/local/endorser.yaml @@ -0,0 +1,149 @@ +fullnameOverride: endorser + +replicaCount: 2 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/endorser.logs: '[{"source": "python.uvicorn", "service": "endorser"}]' + proxy.istio.io/config: |- + proxyMetadata: + ISTIO_META_IDLE_TIMEOUT: 0s +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: d-cloud/endorser + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - endorser.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 3009 + +service: + name: endorser + port: 3009 + containerPort: 3009 + appProtocol: http + +livenessProbe: + httpGet: + path: /health/live + port: endorser +readinessProbe: + httpGet: + path: /health/ready + port: endorser + +autoscaling: + enabled: false + +# resources: +# requests: +# cpu: 50m +# memory: 128Mi +# limits: +# cpu: 250m +# memory: 256Mi + +secretData: + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + +env: + LOG_LEVEL: info + PYTHONPATH: / + + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + + # Trust registry + TRUST_REGISTRY_URL: http://trust-registry:8000 + + GOVERNANCE_ACAPY_LABEL: Governance + ENABLE_SERIALIZE_LOGS: "FALSE" + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/didx-cloud/conf/local/governance-agent.yaml b/helm/didx-cloud/conf/local/governance-agent.yaml new file mode 100644 index 000000000..ca0542857 --- /dev/null +++ b/helm/didx-cloud/conf/local/governance-agent.yaml @@ -0,0 +1,259 @@ +fullnameOverride: governance-agent + +replicaCount: 2 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/governance-agent.logs: '[{"source": "python", "service": "governance-agent", "auto_multi_line_detection": true}]' + +image: + name: d-cloud/governance-agent + pullPolicy: Always + tag: master + +podLabels: + admission.datadoghq.com/enabled: "false" + +command: + - aca-py + - start + - --inbound-transport + - http + - 0.0.0.0 + - 3020 + - --admin + - 0.0.0.0 + - 3021 + - --plugin + - nats_events.v1_0.nats_queue.events + - --plugin-config-value + - nats_queue.connection.connection_url="$(NATS_SERVER)" + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 10 + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: governance-agent-didcomm.{{ .Values.ingressDomain}} + paths: + - path: / + port: 3020 # didcomm + internal-e2e: + enabled: true + className: nginx + rules: + - host: governance-agent.{{ .Values.ingressDomain}} + paths: + - path: / + port: 3021 # http + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 3021 # http + containerPort: 3021 # http + appProtocol: tcp + +addPorts: + - port: 3020 # didcomm + containerPort: 3020 # didcomm + protocol: TCP + +livenessProbe: + httpGet: + path: /status/live + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 300 + timeoutSeconds: 30 +readinessProbe: + httpGet: + path: /status/ready + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 5 + timeoutSeconds: 30 + +# resources: +# requests: +# cpu: 100m +# memory: 256Mi +# limits: +# cpu: 500m +# memory: 512Mi + +initContainers: + - name: wait-for-ledger-browser + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://ledger-browser:8000/status -o /dev/null; do + echo "waiting for ledger-browser to be healthy" + sleep 10 + done + - name: register-did + image: curlimages/curl:latest + env: + - name: ACAPY_WALLET_SEED + valueFrom: + secretKeyRef: + name: '{{ include "didx-cloud.fullname" . }}-env' + key: ACAPY_WALLET_SEED + command: + - "/bin/sh" + - "-c" + - | + STATUS=$(curl -s -X POST \ + -o /dev/null \ + -w "%{http_code}" \ + -H "Content-Type: application/json" -d "{\"seed\": \"$ACAPY_WALLET_SEED\"}" \ + "http://ledger-browser:8000/register" + ) + if [ $STATUS -ne 200 ]; then + echo "Failed to register DID. Status code: $STATUS" + exit 1 + fi + +persistence: + enabled: false + +autoscaling: + enabled: false + +# Sensitive environment variables are sourced from k8s secrets: +# - generated with secretData, or +# - pre-populated with external tooling +# TODO: Helm secret logic to create new secret if not exist +secretData: + ACAPY_ADMIN_API_KEY: adminApiKey + ACAPY_LABEL: Governance + ACAPY_TENANT_AGENT_API_KEY: adminApiKey + ACAPY_WALLET_KEY: verySecretGovernanceWalletKey + ACAPY_WALLET_NAME: governance + ACAPY_WALLET_SEED: verySecretPaddedWalletSeedPadded + + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections": 10, "min_idle_count": 10, "url": "cloudapi-postgresql:5432" }' + ACAPY_WALLET_STORAGE_CREDS: '{ "account": "governance", "admin_account": "governance", "admin_password": "governance", "password": "governance" }' + WALLET_DB_ADMIN_PASS: governance + WALLET_DB_ADMIN_USER: governance + WALLET_DB_HOST: cloudapi-postgresql + WALLET_DB_PASS: governance + WALLET_DB_PORT: 5432 + WALLET_DB_USER: governance + +env: + # NATS related + NATS_CREDS_FILE: "" # NATS in Local dev has no auth + NATS_SERVER: nats://nats:4222 + NATS_SUBJECT: cloudapi.aries.events + NATS_STREAM: cloudapi_aries_events + # for aca-py + ADMIN_URL: http://governance-agent:3021 + ACAPY_OUTBOUND_TRANSPORT: http + ACAPY_ENDPOINT: http://governance-agent:3020 + # Tails server + # Should be changed further + ACAPY_TAILS_SERVER_BASE_URL: http://tails-server:6543 + ACAPY_WALLET_TYPE: askar + ACAPY_WALLET_STORAGE_TYPE: postgres_storage + ACAPY_LOG_LEVEL: info + ACAPY_AUTO_PROVISION: true + # Ledger + ACAPY_GENESIS_URL: http://ledger-browser:8000/genesis + ACAPY_PUBLIC_INVITES: true + ACAPY_ENDORSER_ROLE: endorser + # ## DO NOT CHANGE VARIABLES BELOW + # ## Unless you know exactly what you are doing + # ## Changes will probably break CloudAPI + # Optional Helper Configurations - See https://github.com/hyperledger/aries-cloudagent-python/blob/main/aries_cloudagent/config/argparse.py + ACAPY_AUTO_ACCEPT_INVITES: false + ACAPY_AUTO_ACCEPT_REQUESTS: false + ACAPY_AUTO_PING_CONNECTION: true + ACAPY_AUTO_RESPOND_MESSAGES: false + ACAPY_AUTO_RESPOND_CREDENTIAL_PROPOSAL: false + ACAPY_AUTO_RESPOND_CREDENTIAL_OFFER: false + ACAPY_AUTO_RESPOND_CREDENTIAL_REQUEST: false + ACAPY_AUTO_RESPOND_PRESENTATION_PROPOSAL: false + ACAPY_AUTO_RESPOND_PRESENTATION_REQUEST: false + ACAPY_AUTO_STORE_CREDENTIAL: true + ACAPY_AUTO_VERIFY_PRESENTATION: true + ACAPY_PRESERVE_EXCHANGE_RECORDS: false + ACAPY_AUTO_ENDORSE_TRANSACTIONS: false + + ACAPY_ACCEPT_TAA: "[service_agreement,1.1]" + + ACAPY_REQUESTS_THROUGH_PUBLIC_DID: true + ACAPY_EMIT_NEW_DIDCOMM_PREFIX: true + ACAPY_EMIT_NEW_DIDCOMM_MIME_TYPE: true + + # ACAPY_LOG_CONFIG: /home/aries/logging_config.yaml + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + logging_config.yml: + path: /home/aries/logging_config.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + json_formatter: + (): pythonjsonlogger.jsonlogger.JsonFormatter + format: '%(asctime)s %(name)s %(levelname)s %(pathname)s:%(lineno)d %(message)s' + handlers: + stream_handler: + class: logging.StreamHandler + level: DEBUG + formatter: json_formatter + stream: ext://sys.stderr + timed_file_handler: + class: logging.handlers.TimedRotatingFileHandler + level: DEBUG + formatter: json_formatter + filename: '/home/aries/log/acapy-agent.log' + when: 'd' + interval: 7 + backupCount: 1 + loggers: + '': + level: ERROR + handlers: + - stream_handler + - timed_file_handler diff --git a/helm/didx-cloud/conf/local/governance-web.yaml b/helm/didx-cloud/conf/local/governance-web.yaml new file mode 100644 index 000000000..91f387293 --- /dev/null +++ b/helm/didx-cloud/conf/local/governance-web.yaml @@ -0,0 +1,245 @@ +fullnameOverride: governance-web + +replicaCount: 2 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/governance-web.logs: '[{"source": "python.uvicorn", "service": "governance-web"}]' + +image: + name: d-cloud/governance-web + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - app.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: governance-web.{{ .Values.ingressDomain }} + paths: + - path: /governance + cloudapi-internal: + enabled: true + className: nginx + annotations: + # Retool needs the below + nginx.ingress.kubernetes.io/cors-allow-headers: x-api-key + nginx.ingress.kubernetes.io/enable-cors: "true" + rules: + - host: '{{ .Values.ingressDomain }}' + paths: + - path: /governance + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: http + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + - name: wait-governance-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + + +podLabels: + admission.datadoghq.com/enabled: "true" + sidecar.istio.io/inject: "true" + +secretData: + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + +env: + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + TRUST_REGISTRY_URL: http://trust-registry:8000 + OPENAPI_NAME: CloudAPI Governance + PYTHONPATH: / + ACAPY_ENDORSER_ALIAS: endorser + ROLE: governance + ROOT_PATH: /governance + ENABLE_SERIALIZE_LOGS: "FALSE" + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no + +istio: + auth: + authn: # Configures cloudapi Keycloak realm as JWT issuer for governance + enabled: false + conf: + jwtRules: + - issuer: http://cloudapi.127.0.0.1.nip.io/auth/realms/cloudapi + authz: # Authorisation config + enabled: false + conf: + rules: + - to: + - operation: + methods: ["GET"] + paths: + - /governance/docs + - /governance/docs/* + - /governance/openapi.json + - when: + - key: request.auth.claims[iss] + values: + - http://cloudapi.127.0.0.1.nip.io/auth/realms/cloudapi + - key: request.auth.claims[client_id] + values: + - governance + to: + - operation: + methods: ["*"] + paths: + - /governance + - /governance/* + - when: # exclude internal admin url from Istio authN, i.e., can use it unauthenticated internally + - key: request.headers[host] + values: + - governance-web.cloudapi.127.0.0.1.nip.io + to: + - operation: + methods: ["*"] + paths: + - /governance + - /governance/* diff --git a/helm/didx-cloud/conf/local/ledger-browser.yaml b/helm/didx-cloud/conf/local/ledger-browser.yaml new file mode 100644 index 000000000..0ecc25352 --- /dev/null +++ b/helm/didx-cloud/conf/local/ledger-browser.yaml @@ -0,0 +1,71 @@ +fullnameOverride: ledger-browser + +replicaCount: 1 + +strategy: + type: Recreate + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/ledger-browser.logs: '[{"source": "grok.ledger-browser", "service": "ledger-browser", "auto_multi_line_detection": true}]' + +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: ledger-browser + pullPolicy: Always + tag: master + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: ledger-browser.{{ .Values.ingressDomain }} + paths: + - path: / + port: 8000 + +service: + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: tcp + +command: + - "bash" + - "-c" + - "cp ./config/sample_aml.json ./config/aml.json; cp ./config/sample_taa.json ./config/taa.json; ./scripts/start_webserver.sh" + +livenessProbe: + httpGet: + path: /status/text + port: "{{ trunc 15 .Release.Name }}" + timeoutSeconds: 10 +readinessProbe: + httpGet: + path: /status/text + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 100m +# memory: 386Mi +# limits: +# cpu: 500m +# memory: 386Mi + +secretData: + LEDGER_SEED: 000000000000000000000000Trustee1 + +env: + MAX_FETCH: "50000" + RESYNC_TIME: "120" + REGISTER_NEW_DIDS: "True" + LEDGER_INSTANCE_NAME: Indy Ledger Browser + LOG_LEVEL: info + +extraConfigmapNamesForEnvFrom: + - ips-configmap diff --git a/helm/didx-cloud/conf/local/mediator.yaml b/helm/didx-cloud/conf/local/mediator.yaml new file mode 100644 index 000000000..2f824541c --- /dev/null +++ b/helm/didx-cloud/conf/local/mediator.yaml @@ -0,0 +1,242 @@ +fullnameOverride: mediator + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/mediator.logs: '[{"source": "python", "service": "mediator", "auto_multi_line_detection": true}]' + +image: + registry: ghcr.io/hyperledger + name: aries-cloudagent-python + pullPolicy: Always + tag: py3.12-1.0.0 + +podLabels: + admission.datadoghq.com/enabled: "false" + +command: + - aca-py + - start + - --inbound-transport + - http + - 0.0.0.0 + - 3000 + - --inbound-transport + - ws + - 0.0.0.0 + - 3001 + - --outbound-transport + - ws + - --outbound-transport + - http + - --admin + - 0.0.0.0 + - 3002 + - --endpoint + - http://mediator.{{ .Values.ingressDomain }} + - ws://mediator.{{ .Values.ingressDomain }} + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + annotations: + # Websockets config + # https://kubernetes.github.io/ingress-nginx/user-guide/miscellaneous/#websockets + # 1 hour proxy read/write timeout + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/configuration-snippet: | + if ($http_connection ~* "upgrade") { + proxy_pass http://mediator.{{ .Release.Namespace }}.svc.cluster.local:3001; + break; + } + rules: + - host: mediator.{{ .Values.ingressDomain }} + paths: + - path: / + port: 3000 + internal-admin: + enabled: false + className: nginx + rules: + - host: mediator-admin.{{ .Values.ingressDomain }} + paths: + - path: / + port: 3002 + +service: + port: 3000 + containerPort: 3000 + appProtocol: http + +addPorts: + - name: websocket + port: 3001 + containerPort: 3001 + protocol: TCP + appProtocol: http + - name: admin + port: 3002 + containerPort: 3002 + protocol: TCP + appProtocol: http + +livenessProbe: + httpGet: + path: /status/live + port: admin + initialDelaySeconds: 300 + timeoutSeconds: 30 +readinessProbe: + httpGet: + path: /status/ready + port: admin + initialDelaySeconds: 5 + timeoutSeconds: 30 + +# resources: +# requests: +# cpu: 100m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 384Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + - name: wait-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + +# Sensitive environment variables are sourced from k8s secrets: +# - generated with secretData, or +# - pre-populated with external tooling +# TODO: Helm secret logic to create new secret if not exist +secretData: + ACAPY_ADMIN_API_KEY: adminApiKey + ACAPY_WALLET_KEY: verySecureMediatorWalletKey + + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_idle_count":10, "url":"cloudapi-postgresql:5432" }' + ACAPY_WALLET_STORAGE_CREDS: '{ "account":"mediator", "admin_account":"mediator", "admin_password":"mediator", "password":"mediator" }' + WALLET_DB_ADMIN_PASS: mediator + WALLET_DB_ADMIN_USER: mediator + WALLET_DB_HOST: cloudapi-postgresql + WALLET_DB_PASS: mediator + WALLET_DB_PORT: 5432 + WALLET_DB_USER: mediator + +env: + ACAPY_LABEL: Aca-Py Mediator + ACAPY_WALLET_NAME: mediator + # Mediator does not use a ledger + ACAPY_NO_LEDGER: true + # Wallet + ACAPY_WALLET_TYPE: askar + ACAPY_AUTO_PROVISION: true + # Mediation + ACAPY_MEDIATION_OPEN: true + ACAPY_ENABLE_UNDELIVERED_QUEUE: true + # Connections + ACAPY_DEBUG_CONNECTIONS: true + ACAPY_AUTO_ACCEPT_INVITES: true + ACAPY_AUTO_ACCEPT_REQUESTS: true + ACAPY_AUTO_PING_CONNECTION: true + # Print admin invite + # ACAPY_MEDIATION_CONNECTIONS_INVITE: true + ACAPY_INVITE_LABEL: Aca-Py Mediator + ACAPY_INVITE_MULTI_USE: true + ACAPY_CONNECTIONS_INVITE: true + + ACAPY_WALLET_STORAGE_TYPE: postgres_storage + ACAPY_LOG_LEVEL: info + + ACAPY_EMIT_NEW_DIDCOMM_PREFIX: true + ACAPY_EMIT_NEW_DIDCOMM_MIME_TYPE: true + + # ACAPY_LOG_CONFIG: /home/aries/logging_config.yaml + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + logging_config.yml: + path: /home/aries/logging_config.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + json_formatter: + (): pythonjsonlogger.jsonlogger.JsonFormatter + format: '%(asctime)s %(name)s %(levelname)s %(pathname)s:%(lineno)d %(message)s' + handlers: + stream_handler: + class: logging.StreamHandler + level: DEBUG + formatter: json_formatter + stream: ext://sys.stderr + timed_file_handler: + class: logging.handlers.TimedRotatingFileHandler + level: DEBUG + formatter: json_formatter + filename: '/home/aries/log/acapy-agent.log' + when: 'd' + interval: 7 + backupCount: 1 + loggers: + '': + level: ERROR + handlers: + - stream_handler + - timed_file_handler diff --git a/helm/didx-cloud/conf/local/multitenant-agent.yaml b/helm/didx-cloud/conf/local/multitenant-agent.yaml new file mode 100644 index 000000000..c0d8c8931 --- /dev/null +++ b/helm/didx-cloud/conf/local/multitenant-agent.yaml @@ -0,0 +1,256 @@ +fullnameOverride: multitenant-agent + +# Because we don't have a RWX StorageClass in the local Kind cluster +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/multitenant-agent.logs: '[{"source": "python", "service": "multitenant-agent", "auto_multi_line_detection": true}]' + +image: + name: d-cloud/multitenant-agent + tag: master + pullPolicy: Always + +podLabels: + admission.datadoghq.com/enabled: "false" + +command: + - aca-py + - start + - --inbound-transport + - http + - 0.0.0.0 + - 3020 + - --admin + - 0.0.0.0 + - 3021 + - --plugin + - acapy_wallet_groups_plugin + - --auto-promote-author-did + - --plugin + - nats_events.v1_0.nats_queue.events + - --plugin-config-value + - nats_queue.connection.connection_url="$(NATS_SERVER)" + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 10 + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: multitenant-agent-didcomm.{{ .Values.ingressDomain }} + paths: + - path: / + port: 3020 + internal-e2e: + enabled: true + className: nginx + rules: + - host: multitenant-agent.{{ .Values.ingressDomain }} + paths: + - path: / + port: 3021 + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 3021 + containerPort: 3021 + appProtocol: tcp + +addPorts: + - port: 3020 + containerPort: 3020 + protocol: TCP + +livenessProbe: + httpGet: + path: /status/live + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 300 + timeoutSeconds: 30 +readinessProbe: + httpGet: + path: /status/ready + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 5 + timeoutSeconds: 30 + +# resources: +# requests: +# cpu: 100m +# memory: 256Mi +# limits: +# cpu: 500m +# memory: 512Mi + +initContainers: + - name: wait-for-ledger-browser + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://ledger-browser:8000/status -o /dev/null; do + echo "waiting for ledger-browser to be healthy" + sleep 2 + done + +persistence: + enabled: true + mountPath: /home/aries/.indy_client + capacity: 1Gi + storageClassName: standard + accessMode: ReadWriteOnce + +autoscaling: + enabled: false + +# Sensitive environment variables are sourced from k8s secrets: +# - generated with secretData, or +# - pre-populated with external tooling +# TODO: Helm secret logic to create new secret if not exist +secretData: + ACAPY_WALLET_KEY: verySecretMultitenantWalletKey + ACAPY_ADMIN_API_KEY: adminApiKey + ACAPY_MULTITENANT_JWT_SECRET: verySecretMultitenantJwtSecret + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + ACAPY_LABEL: Multitenant + ACAPY_WALLET_NAME: multitenant + ACAPY_MULTITENANCY_CONFIGURATION: '{ "wallet_type":"single-wallet-askar", "wallet_name":"multitenant" }' + + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_idle_count":10, "url":"cloudapi-postgresql:5432" }' + ACAPY_WALLET_STORAGE_CREDS: '{ "account":"multitenant", "admin_account":"multitenant", "admin_password":"multitenant", "password":"multitenant" }' + WALLET_DB_ADMIN_PASS: multitenant + WALLET_DB_ADMIN_USER: multitenant + WALLET_DB_HOST: cloudapi-postgresql + WALLET_DB_PASS: multitenant + WALLET_DB_PORT: 5432 + WALLET_DB_USER: multitenant + +env: + # NATS related + NATS_CREDS_FILE: "" # NATS in Local dev has no auth + NATS_SERVER: nats://nats:4222 + NATS_SUBJECT: cloudapi.aries.events + NATS_STREAM: cloudapi_aries_events + # for aca-py + ADMIN_URL: http://multitenant-agent:3021 + ACAPY_OUTBOUND_TRANSPORT: http + ACAPY_ADMIN: "[0.0.0.0,3021]" + ACAPY_ENDPOINT: http://multitenant-agent:3020 + # Tails server + ACAPY_TAILS_SERVER_BASE_URL: http://tails-server:6543 + + ACAPY_WALLET_TYPE: askar + ACAPY_WALLET_STORAGE_TYPE: postgres_storage + ACAPY_LOG_LEVEL: info + ACAPY_AUTO_PROVISION: true + # Ledger + ACAPY_GENESIS_URL: http://ledger-browser:8000/genesis + + # Multi-tenant Configuration + ACAPY_MULTITENANT: true + ACAPY_MULTITENANT_ADMIN: false + ACAPY_PUBLIC_INVITES: true + # ## DO NOT CHANGE VARIABLES BELOW + # ## Unless you know exactly what you are doing + # ## Changes will probably break CloudAPI + # Optional Helper Configurations - See https://github.com/hyperledger/aries-cloudagent-python/blob/main/aries_cloudagent/config/argparse.py + ACAPY_AUTO_ACCEPT_INVITES: true + ACAPY_AUTO_ACCEPT_REQUESTS: true + ACAPY_AUTO_PING_CONNECTION: true + ACAPY_AUTO_RESPOND_MESSAGES: false + ACAPY_AUTO_RESPOND_CREDENTIAL_PROPOSAL: false + ACAPY_AUTO_RESPOND_CREDENTIAL_OFFER: false + ACAPY_AUTO_RESPOND_CREDENTIAL_REQUEST: false + ACAPY_AUTO_RESPOND_PRESENTATION_PROPOSAL: false + ACAPY_AUTO_RESPOND_PRESENTATION_REQUEST: false + ACAPY_AUTO_STORE_CREDENTIAL: true + ACAPY_AUTO_VERIFY_PRESENTATION: true + ACAPY_PRESERVE_EXCHANGE_RECORDS: false + ACAPY_CREATE_REVOCATION_TRANSACTIONS: true + # Endorser + ACAPY_ENDORSER_ROLE: author + ACAPY_AUTO_REQUEST_ENDORSEMENT: true + ACAPY_AUTO_WRITE_TRANSACTIONS: true + ACAPY_ENDORSER_ALIAS: endorser + + ACAPY_REQUESTS_THROUGH_PUBLIC_DID: true + ACAPY_EMIT_NEW_DIDCOMM_PREFIX: true + ACAPY_EMIT_NEW_DIDCOMM_MIME_TYPE: true + + # ## From mt-agent-env secret + # ACAPY_MULTITENANCY_CONFIGURATION: '{"wallet_type":"askar-profile","wallet_name":"xxx"}' + + # ACAPY_LOG_CONFIG: /home/aries/logging_config.yaml + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + logging_config.yml: + path: /home/aries/logging_config.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + json_formatter: + (): pythonjsonlogger.jsonlogger.JsonFormatter + format: '%(asctime)s %(name)s %(wallet_id)s %(levelname)s %(pathname)s:%(lineno)d %(message)s' + handlers: + stream_handler: + class: logging.StreamHandler + level: DEBUG + formatter: json_formatter + stream: ext://sys.stderr + timed_file_handler: + class: logging.handlers.TimedRotatingFileHandler + level: DEBUG + formatter: json_formatter + filename: '/home/aries/log/acapy-agent.log' + when: 'd' + interval: 7 + backupCount: 1 + loggers: + '': + level: ERROR + handlers: + - stream_handler + - timed_file_handler diff --git a/helm/didx-cloud/conf/local/multitenant-web.yaml b/helm/didx-cloud/conf/local/multitenant-web.yaml new file mode 100644 index 000000000..5d645e5d5 --- /dev/null +++ b/helm/didx-cloud/conf/local/multitenant-web.yaml @@ -0,0 +1,204 @@ +fullnameOverride: multitenant-web + +replicaCount: 2 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/multitenant-web.logs: '[{"source": "python.uvicorn", "service": "multitenant-web"}]' + +image: + name: d-cloud/multitenant-web + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - app.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: multitenant-web.{{ .Values.ingressDomain }} + paths: + - path: /tenant-admin + cloudapi-internal: + enabled: true + className: nginx + annotations: + # Retool needs the below + nginx.ingress.kubernetes.io/cors-allow-headers: x-api-key + nginx.ingress.kubernetes.io/enable-cors: "true" + rules: + - host: '{{ .Values.ingressDomain }}' + paths: + - path: /tenant-admin + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: http + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + - name: wait-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + +podLabels: + admission.datadoghq.com/enabled: "true" + sidecar.istio.io/inject: "true" + +secretData: + ACAPY_MULTITENANT_JWT_SECRET: verySecretMultitenantJwtSecret + ACAPY_TENANT_AGENT_API_KEY: adminApiKey + +env: + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + TRUST_REGISTRY_URL: http://trust-registry:8000 + OPENAPI_NAME: CloudAPI Multitenant Admin + PYTHONPATH: / + ACAPY_ENDORSER_ALIAS: endorser + ROLE: tenant-admin + ROOT_PATH: /tenant-admin + ENABLE_SERIALIZE_LOGS: "FALSE" + GOVERNANCE_ACAPY_LABEL: Governance + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/didx-cloud/conf/local/public-web.yaml b/helm/didx-cloud/conf/local/public-web.yaml new file mode 100644 index 000000000..b45b70407 --- /dev/null +++ b/helm/didx-cloud/conf/local/public-web.yaml @@ -0,0 +1,203 @@ +fullnameOverride: public-web + +replicaCount: 2 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/public-web.logs: '[{"source": "python.uvicorn", "service": "public-web"}]' + +image: + name: d-cloud/public-web + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - app.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: public-web.{{ .Values.ingressDomain }} + paths: + - path: /public + cloudapi-internal: + enabled: true + className: nginx + annotations: + # Retool needs the below + nginx.ingress.kubernetes.io/cors-allow-headers: x-api-key + nginx.ingress.kubernetes.io/enable-cors: "true" + rules: + - host: '{{ .Values.ingressDomain }}' + paths: + - path: /public + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: http + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + - name: wait-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + + +podLabels: + admission.datadoghq.com/enabled: "true" + +extraSecretNamesForEnvFrom: {} + +secretData: {} + +env: + # ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + # ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + TRUST_REGISTRY_URL: http://trust-registry:8000 + OPENAPI_NAME: CloudAPI Public + PYTHONPATH: / + # ACAPY_ENDORSER_ALIAS: endorser + ROLE: public + ROOT_PATH: /public + ENABLE_SERIALIZE_LOGS: "FALSE" + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/didx-cloud/conf/local/tails-server.yaml b/helm/didx-cloud/conf/local/tails-server.yaml new file mode 100644 index 000000000..09bfe85cd --- /dev/null +++ b/helm/didx-cloud/conf/local/tails-server.yaml @@ -0,0 +1,72 @@ +fullnameOverride: tails-server + +# Because we don't have a RWX StorageClass in the local Kind cluster +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/tails-server.logs: '[{"source": "python.acapy", "service": "tails-server"}]' +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: d-cloud/tails-server + # registry: ghcr.io/bcgov + # tag: 1.1 + pullPolicy: Always + tag: master + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: tails-server.{{ .Values.ingressDomain }} + paths: + - path: / + port: 6543 + +args: + - tails-server + - --host + - 0.0.0.0 + - --port + - 6543 + - --storage-path + - /tails-server-db + - --log-level + - INFO + +service: + port: 6543 + containerPort: 6543 + appProtocol: tcp + +livenessProbe: + tcpSocket: + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + tcpSocket: + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 5 + +persistence: + enabled: true + mountPath: /tails-server-db + capacity: 10Gi + storageClassName: standard + accessMode: ReadWriteOnce + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 65534 + +# resources: +# requests: +# cpu: 50m +# memory: 128Mi +# limits: +# cpu: 250m +# memory: 256Mi diff --git a/helm/didx-cloud/conf/local/tenant-web.yaml b/helm/didx-cloud/conf/local/tenant-web.yaml new file mode 100644 index 000000000..2c2ca413b --- /dev/null +++ b/helm/didx-cloud/conf/local/tenant-web.yaml @@ -0,0 +1,212 @@ +fullnameOverride: tenant-web + +replicaCount: 2 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/tenant-web.logs: '[{"source": "python.uvicorn", "service": "tenant-web"}]' + +image: + name: d-cloud/tenant-web + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - app.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: tenant-web.{{ .Values.ingressDomain }} + paths: + - path: /tenant + cloudapi-internal: + enabled: true + className: nginx + annotations: + # Retool needs the below + nginx.ingress.kubernetes.io/cors-allow-headers: x-api-key + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/client-body-timeout: "120" + nginx.ingress.kubernetes.io/proxy-connect-timeout: "120" + nginx.ingress.kubernetes.io/proxy-read-timeout: "120" + nginx.ingress.kubernetes.io/proxy-send-timeout: "120" + rules: + - host: '{{ .Values.ingressDomain }}' + paths: + - path: /tenant + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: http + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + - name: wait-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + +secretData: + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + ACAPY_MULTITENANT_JWT_SECRET: verySecretMultitenantJwtSecret + ACAPY_TENANT_AGENT_API_KEY: adminApiKey # This is, potentially, not needed + +podLabels: + admission.datadoghq.com/enabled: "true" + +env: + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + TRUST_REGISTRY_URL: http://trust-registry:8000 + OPENAPI_NAME: CloudAPI Tenant + PYTHONPATH: / + ACAPY_ENDORSER_ALIAS: endorser + ROLE: tenant + ROOT_PATH: /tenant + ACAPY_TAILS_SERVER_BASE_URL: http://tails-server:6543 + ENABLE_SERIALIZE_LOGS: "FALSE" + GOVERNANCE_ACAPY_LABEL: Governance + REGISTRY_CREATION_TIMEOUT: 120 + REGISTRY_SIZE: 100 + WAYPOINT_URL: http://waypoint:3010 + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/didx-cloud/conf/local/trust-registry.yaml b/helm/didx-cloud/conf/local/trust-registry.yaml new file mode 100644 index 000000000..4b31738e2 --- /dev/null +++ b/helm/didx-cloud/conf/local/trust-registry.yaml @@ -0,0 +1,163 @@ +fullnameOverride: trust-registry + +replicaCount: 2 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/trust-registry.logs: '[{"source": "python.uvicorn", "service": "trust-registry"}]' +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: d-cloud/trust-registry + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - trustregistry.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 5 + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: trust-registry.{{ .Values.ingressDomain }} + paths: + - path: / + port: 8000 + +service: + appProtocol: tcp + hostNetwork: false + port: 8000 + containerPort: 8000 + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +autoscaling: + enabled: false + +secretData: + POSTGRES_DATABASE_URL: postgresql://trust-registry:trust-registry@cloudapi-postgresql:5432/trust-registry?sslmode=prefer + +env: + PYTHONPATH: / + OPENAPI_NAME: Trustregistry + LOG_LEVEL: warning + POSTGRES_POOL_SIZE: 10 + POSTGRES_MAX_OVERFLOW: 20 + POSTGRES_POOL_RECYCLE: 600 # 10 minutes + POSTGRES_POOL_TIMEOUT: 30 + ENABLE_SERIALIZE_LOGS: "FALSE" + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/didx-cloud/conf/local/waypoint.yaml b/helm/didx-cloud/conf/local/waypoint.yaml new file mode 100644 index 000000000..7246abd72 --- /dev/null +++ b/helm/didx-cloud/conf/local/waypoint.yaml @@ -0,0 +1,163 @@ +fullnameOverride: waypoint + +replicaCount: 2 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/waypoint.logs: '[{"source": "python.uvicorn", "service": "waypoint"}]' + proxy.istio.io/config: |- + proxyMetadata: + ISTIO_META_IDLE_TIMEOUT: 0s +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: d-cloud/waypoint + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - waypoint.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 3010 + +ingressDomain: cloudapi.127.0.0.1.nip.io +ingress: + internal: + enabled: true + className: nginx + rules: + - host: waypoint.{{ .Values.ingressDomain }} + paths: + - path: / + +service: + # if set, will run Pods on Node Network + appProtocol: http + hostNetwork: false + port: 3010 + containerPort: 3010 + +livenessProbe: + httpGet: + path: /health/live + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /health/ready + port: "{{ trunc 15 .Release.Name }}" + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +# resources: +# requests: +# cpu: 50m +# memory: 384Mi +# limits: +# cpu: 250m +# memory: 512Mi + +autoscaling: + enabled: false + +env: + LOG_LEVEL: info + OPENAPI_NAME: waypoint + PYTHONPATH: "/" + ENABLE_SERIALIZE_LOGS: "FALSE" + NATS_CREDS_FILE: "" # NATS in Local dev has no auth + NATS_SERVER: nats://nats:4222 + NATS_STATE_SUBJECT: cloudapi.aries.state_monitoring + NATS_STATE_STREAM: cloudapi_aries_state_monitoring + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/lifecycle + operator: In + values: + - spot + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - '{{ include "didx-cloud.fullname" . }}' + topologyKey: kubernetes.io/hostname + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ include "didx-cloud.fullname" . }}' + # topologyKey: kubernetes.io/hostname + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/didx-cloud/templates/NOTES.txt b/helm/didx-cloud/templates/NOTES.txt new file mode 100644 index 000000000..cd759310a --- /dev/null +++ b/helm/didx-cloud/templates/NOTES.txt @@ -0,0 +1,22 @@ + +[ didx:cloud ] + +Installed as: {{ .Release.Name }} +Namespace: {{ .Release.Namespace }} + +Available externally as: +{{- range .Values.ingress }} +{{- if .enabled }} +{{- range .hosts }} +http://{{ tpl .host $ }} +{{- end }} +{{- end }} +{{- end }} + +Connect to this instance from within your cluster: + + {{ include "didx-cloud.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port }} + +View resources: + + $ helm -n {{ .Release.Namespace }} get all {{ .Release.Name }} diff --git a/helm/didx-cloud/templates/_helpers.tpl b/helm/didx-cloud/templates/_helpers.tpl new file mode 100644 index 000000000..2b4746b2f --- /dev/null +++ b/helm/didx-cloud/templates/_helpers.tpl @@ -0,0 +1,102 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "didx-cloud.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "didx-cloud.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "didx-cloud.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "didx-cloud.labels" -}} +helm.sh/chart: {{ include "didx-cloud.chart" . }} +{{ include "didx-cloud.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "didx-cloud.selectorLabels" -}} +app.kubernetes.io/name: {{ include "didx-cloud.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "didx-cloud.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "didx-cloud.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_tplvalues.tpl +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/helm/didx-cloud/templates/configmap.yaml b/helm/didx-cloud/templates/configmap.yaml new file mode 100644 index 000000000..c38689071 --- /dev/null +++ b/helm/didx-cloud/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- range $configName, $configContent := .Values.configFiles }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ include "didx-cloud.fullname" $ }}-{{ $configName | replace "_" "" | lower }}" + labels: + {{- include "didx-cloud.labels" $ | nindent 4 }} +data: + {{ $configName }}: |- + {{- tpl $configContent.content $ | nindent 4 }} +{{- end }} diff --git a/helm/didx-cloud/templates/deployment.yaml b/helm/didx-cloud/templates/deployment.yaml new file mode 100644 index 000000000..aa35d4642 --- /dev/null +++ b/helm/didx-cloud/templates/deployment.yaml @@ -0,0 +1,204 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "didx-cloud.fullname" . }} + labels: + {{- include "didx-cloud.labels" . | nindent 4 }} + {{- if .Values.deploymentLabels }} + {{- tpl (toYaml .Values.deploymentLabels) . | nindent 4 }} + {{- end }} + {{- if .Values.deploymentAnnotations }} + annotations: + {{- tpl (toYaml .Values.deploymentAnnotations) . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + strategy: + type: {{ default "RollingUpdate" .Values.strategy.type }} + {{- if not (eq .Values.strategy.type "Recreate") }} + rollingUpdate: + maxSurge: {{ default "25%" .Values.strategy.rollingUpdate.maxSurge }} + maxUnavailable: {{ default "25%" .Values.strategy.rollingUpdate.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "didx-cloud.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "didx-cloud.selectorLabels" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- tpl (toYaml .Values.podLabels) . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.podAnnotations }} + {{- tpl (toYaml .Values.podAnnotations) . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.initContainers }} + initContainers: + {{- tpl (toYaml .Values.initContainers) . | nindent 6 }} + {{- end }} + volumes: + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 6 }} + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + persistentVolumeClaim: + claimName: {{ include "didx-cloud.fullname" . }} + {{- end }} + {{- range $configName, $configContent := .Values.configFiles }} + - name: "{{ $.Release.Name }}-{{ $configName | replace "." "-" | replace "_" "-" | lower }}" + configMap: + name: "{{ include "didx-cloud.fullname" $ }}-{{ $configName | replace "_" "" | lower }}" + defaultMode: 0777 + {{- end }} + automountServiceAccountToken: {{ .Values.serviceAccount.automount }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + # allow old pods to stay up up to specified interval + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- if .Values.service.hostNetwork }} + hostNetwork: true + {{- end }} + containers: + - name: {{ include "didx-cloud.fullname" . }} + securityContext: + {{- toYaml .Values.securityContext | nindent 10 }} + image: "{{ .Values.image.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.args }} + args: + {{- range .Values.args }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ tpl ( toString . ) $ | quote }} + {{- end }} + {{- end }} + ports: + {{- if .Values.service.name }} + - name: {{ tpl .Values.service.name $ }} + {{- else }} + - name: "{{ trunc 15 .Release.Name }}" + {{- end }} + containerPort: {{ .Values.service.containerPort }} + protocol: TCP + {{- range .Values.addPorts }} + {{- if .name }} + - name: {{ tpl .name $ }} + {{- else }} + - name: {{ .port }}-{{ .protocol | lower | default "tcp" }} + {{- end }} + containerPort: {{ .containerPort }} + protocol: {{ .protocol | default "TCP" }} + {{- end }} + {{- range $index, $range := .Values.addPortsRange }} + {{- range untilStep ( .minPort | int ) ( add1 .maxPort | int ) 1 }} + - name: "{{ . }}-{{ lower $range.protocol | default "tcp" }}" + containerPort: {{ . }} + protocol: {{ $range.protocol | default "TCP" }} + {{- end }} + {{- end }} + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- range $k,$v := .Values.env }} + - name: {{ $k }} + {{- $type := printf "%s" (typeOf $v) }} + {{- if or (eq $type "string") (eq $type "float64") (eq $type "bool") }} + value: {{ tpl (toString $v) $ | quote }} + {{- else }} + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.livenessProbe }} + livenessProbe: + {{- include "common.tplvalues.render" (dict "value" .Values.livenessProbe "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.readinessProbe }} + readinessProbe: + {{- include "common.tplvalues.render" (dict "value" .Values.readinessProbe "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.startupProbe }} + startupProbe: + {{- include "common.tplvalues.render" (dict "value" .Values.startupProbe "context" $) | nindent 10 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + envFrom: + {{- if .Values.secretData }} + - secretRef: + name: {{ include "didx-cloud.fullname" . }}-env + {{- end }} + {{- range .Values.extraSecretNamesForEnvFrom }} + - secretRef: + name: {{ . }} + {{- end }} + {{- range $.Values.extraConfigmapNamesForEnvFrom }} + - configMapRef: + name: {{ . }} + {{- end }} + volumeMounts: + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 8 }} + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- if .Values.configFiles }} + {{- range $configName, $configContent := .Values.configFiles }} + - name: "{{ $.Release.Name }}-{{ $configName | replace "." "-" | replace "_" "-" | lower }}" + mountPath: {{ $configContent.path }} + subPath: {{ $configName }} + {{- end }} + {{- end }} + {{- with .Values.lifecycle }} + lifecycle: + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 10 }} + {{- end }} + {{- range .Values.extraContainers }} + - name: "{{ .name }}" + securityContext: + {{- toYaml .securityContext | nindent 12 }} + image: "{{ .image.repository | default $.Values.image.repository }}:{{ .image.tag | default $.Values.image.tag }}" + imagePullPolicy: {{ .image.pullPolicy | default $.Values.image.pullPolicy }} + ports: + - name: "{{ trunc 15 .name }}" + containerPort: {{ .port }} + protocol: TCP + resources: + {{- toYaml .resources | nindent 12 }} + command: + {{- range .command }} + - {{ tpl ( toString . ) $ | quote }} + {{- end }} + envFrom: + - configMapRef: + name: {{ include "didx-cloud.fullname" $ }}-env + {{- range $.Values.extraSecretNamesForEnvFrom }} + - secretRef: + name: {{ . }} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} diff --git a/helm/didx-cloud/templates/hpa.yaml b/helm/didx-cloud/templates/hpa.yaml new file mode 100644 index 000000000..e2679c0c6 --- /dev/null +++ b/helm/didx-cloud/templates/hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "didx-cloud.fullname" . }} + labels: + {{- include "didx-cloud.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "didx-cloud.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helm/didx-cloud/templates/ingress.yaml b/helm/didx-cloud/templates/ingress.yaml new file mode 100644 index 000000000..75a748a4d --- /dev/null +++ b/helm/didx-cloud/templates/ingress.yaml @@ -0,0 +1,68 @@ +{{- range $key, $_ := .Values.ingress }} +{{- if eq (tpl (toString .enabled) $) "true" }} +{{- if $key }} +--- +{{- end }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + {{- if .name }} + name: {{ tpl .name $ }} + {{- else }} + name: {{ printf "%s-%s" (include "didx-cloud.fullname" $) $key }} + {{- end }} + {{- with .namespace }} + namespace: {{ tpl . $ }} + {{- end }} + labels: + {{- include "didx-cloud.labels" $ | nindent 4 }} + {{- with .labels }} + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 4 }} + {{- end }} + {{- with .annotations }} + annotations: + {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 4 }} + {{- end }} +spec: +{{- if .tls }} + tls: + {{- range .tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + secretName: {{ tpl .secretName $ }} + {{- end }} +{{- end }} + ingressClassName: {{ .className }} + rules: + {{- range $rules := .rules }} + - host: {{ tpl $rules.host $ | quote }} + http: + paths: + {{- if not $rules.paths }} + - path: / + pathType: Prefix + backend: + service: + name: {{ include "didx-cloud.fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- else }} + {{- range $paths := $rules.paths }} + {{- if $paths.path }} + - path: {{ tpl $paths.path $ }} + {{- else }} + - path: / + {{- end }} + pathType: {{ default "Prefix" $paths.pathType }} + backend: + service: + name: {{ default (include "didx-cloud.fullname" $) $paths.service }} + port: + number: {{ default $.Values.service.port $paths.port }} + {{- end -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} diff --git a/helm/didx-cloud/templates/istio-authnz.yaml b/helm/didx-cloud/templates/istio-authnz.yaml new file mode 100644 index 000000000..480926660 --- /dev/null +++ b/helm/didx-cloud/templates/istio-authnz.yaml @@ -0,0 +1,60 @@ +{{- with .Values.istio.auth }} +{{- with .authn }} +{{- if .enabled }} +# https://istio.io/latest/docs/tasks/security/authentication/authn-policy/ +# https://istio.io/latest/docs/reference/config/security/request_authentication/ +# https://istio.io/latest/docs/reference/config/security/jwt/ +apiVersion: security.istio.io/v1 +kind: RequestAuthentication +metadata: + name: {{ include "didx-cloud.fullname" $ }} + labels: + {{- include "didx-cloud.labels" $ | nindent 4 }} + {{- if .labels }} + {{- tpl (toYaml .labels) $ | nindent 4 }} + {{- end }} + {{- if .annotations }} + annotations: + {{- tpl (toYaml .annotations) $ | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "didx-cloud.selectorLabels" $ | nindent 6 }} + jwtRules: + {{- tpl (toYaml .conf.jwtRules) $ | nindent 4 }} +{{- end }} +{{- end }} +--- +{{- with .authz }} +{{- if .enabled }} +# https://istio.io/latest/docs/reference/config/security/authorization-policy/ +apiVersion: security.istio.io/v1 +kind: AuthorizationPolicy +metadata: + name: {{ include "didx-cloud.fullname" $ }} + labels: + {{- include "didx-cloud.labels" $ | nindent 4 }} + {{- if .labels }} + {{- tpl (toYaml .labels) $ | nindent 4 }} + {{- end }} + {{- if .annotations }} + annotations: + {{- tpl (toYaml .annotations) $ | nindent 4 }} + {{- end }} +spec: + action: {{ default "ALLOW" .conf.action }} + selector: + matchLabels: + {{- include "didx-cloud.selectorLabels" $ | nindent 6 }} + rules: + {{- if .conf.rules }} + {{- tpl (toYaml .conf.rules) $ | nindent 4 }} + {{- else }} + - from: + - source: + requestPrincipals: ["*"] + {{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/helm/didx-cloud/templates/istio-envoy.yaml b/helm/didx-cloud/templates/istio-envoy.yaml new file mode 100644 index 000000000..e79372b1b --- /dev/null +++ b/helm/didx-cloud/templates/istio-envoy.yaml @@ -0,0 +1,24 @@ +{{- with .Values.istio.envoyFilter }} +{{- if .enabled }} +# https://istio.io/latest/docs/reference/config/networking/envoy-filter/ +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: {{ include "didx-cloud.fullname" $ }} + labels: + {{- include "didx-cloud.labels" $ | nindent 4 }} + {{- if .labels }} + {{- tpl (toYaml .labels) $ | nindent 4 }} + {{- end }} + {{- if .annotations }} + annotations: + {{- tpl (toYaml .annotations) $ | nindent 4 }} + {{- end }} +spec: + workloadSelector: + labels: + {{- include "didx-cloud.selectorLabels" $ | nindent 6 }} + configPatches: + {{- tpl (toYaml .patches) $ | nindent 4 }} +{{- end }} +{{- end }} diff --git a/helm/didx-cloud/templates/istio-peerauth.yaml b/helm/didx-cloud/templates/istio-peerauth.yaml new file mode 100644 index 000000000..b4f82fdc1 --- /dev/null +++ b/helm/didx-cloud/templates/istio-peerauth.yaml @@ -0,0 +1,24 @@ +{{- with .Values.istio.peerAuth }} +{{- if .enabled }} +# https://istio.io/latest/docs/reference/config/security/peer_authentication/ +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: {{ include "didx-cloud.fullname" $ }} + labels: + {{- include "didx-cloud.labels" $ | nindent 4 }} + {{- if .labels }} + {{- tpl (toYaml .labels) $ | nindent 4 }} + {{- end }} + {{- if .annotations }} + annotations: + {{- tpl (toYaml .annotations) $ | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "didx-cloud.selectorLabels" $ | nindent 6 }} + mtls: + mode: {{ default "PERMISSIVE" .conf.mtls.mode }} +{{- end }} +{{- end }} diff --git a/helm/didx-cloud/templates/istio-sidecar.yaml b/helm/didx-cloud/templates/istio-sidecar.yaml new file mode 100644 index 000000000..dc87e824c --- /dev/null +++ b/helm/didx-cloud/templates/istio-sidecar.yaml @@ -0,0 +1,36 @@ +{{- with .Values.istio.sidecar }} +{{- if .enabled }} +# https://istio.io/latest/docs/reference/config/networking/sidecar/ +apiVersion: networking.istio.io/v1beta1 +kind: Sidecar +metadata: + name: {{ include "didx-cloud.fullname" $ }} + labels: + {{- include "didx-cloud.labels" $ | nindent 4 }} + {{- if .labels }} + {{- tpl (toYaml .labels) $ | nindent 4 }} + {{- end }} + {{- if .annotations }} + annotations: + {{- tpl (toYaml .annotations) $ | nindent 4 }} + {{- end }} +spec: + workloadSelector: + labels: + {{- include "didx-cloud.selectorLabels" $ | nindent 6 }} + {{- if .conf.ingress }} + ingress: + {{- tpl (toYaml .conf.ingress) $ | nindent 4 }} + {{- end }} + egress: + {{- if .conf.egress }} + {{- tpl (toYaml .conf.egress) $ | nindent 4 }} + {{- else }} + - hosts: + - "./*" + - "istio-system/*" + {{- end }} + outboundTrafficPolicy: + mode: {{ default "ALLOW_ANY" .conf.outboundTrafficPolicy.mode }} +{{- end }} +{{- end }} diff --git a/helm/didx-cloud/templates/istio-virtualservice.yaml b/helm/didx-cloud/templates/istio-virtualservice.yaml new file mode 100644 index 000000000..f4e34fa1e --- /dev/null +++ b/helm/didx-cloud/templates/istio-virtualservice.yaml @@ -0,0 +1,52 @@ +{{- with .Values.istio.virtualService }} +{{- if .enabled }} +# https://istio.io/latest/docs/reference/config/networking/virtual-service/ +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: {{ include "didx-cloud.fullname" $ }} + labels: + {{- include "didx-cloud.labels" $ | nindent 4 }} + {{- if .labels }} + {{- tpl (toYaml .labels) $ | nindent 4 }} + {{- end }} + {{- if .annotations }} + annotations: + {{- tpl (toYaml .annotations) $ | nindent 4 }} + {{- end }} +spec: + {{- if and .conf.gateways .conf.hosts }} + gateways: + {{- range $gw := .conf.gateways }} + - {{ tpl $gw $ }} + {{- end }} + {{- else if .conf.hosts }} + gateways: + - mesh + - istio-system/istio-internal + {{- else }} + gateways: [] + {{- end }} + {{- if .conf.hosts }} + hosts: + {{- range $h := .conf.hosts }} + - {{ tpl $h $ }} + {{- end }} + {{- end }} + http: + {{- if .conf.http }} + {{- tpl (toYaml .conf.http) $ | nindent 4 }} + {{- else }} + - name: {{ include "didx-cloud.fullname" $ }} + route: + - destination: + host: {{ include "didx-cloud.fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- end }} + {{- if .conf.tcp }} + tcp: + {{- tpl (.conf.tcp) $ | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/helm/didx-cloud/templates/pdb.yaml b/helm/didx-cloud/templates/pdb.yaml new file mode 100644 index 000000000..dc457f38f --- /dev/null +++ b/helm/didx-cloud/templates/pdb.yaml @@ -0,0 +1,11 @@ +{{- if and (gt (int .Values.replicaCount) 1) .Values.pdb.enabled }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "didx-cloud.fullname" . }} +spec: + maxUnavailable: {{ default 1 .Values.pdb.maxUnavailable }} + selector: + matchLabels: + {{- include "didx-cloud.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/helm/didx-cloud/templates/pvc.yaml b/helm/didx-cloud/templates/pvc.yaml new file mode 100644 index 000000000..7f00b60fe --- /dev/null +++ b/helm/didx-cloud/templates/pvc.yaml @@ -0,0 +1,15 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "didx-cloud.fullname" . }} + labels: + {{- include "didx-cloud.labels" . | nindent 4 }} +spec: + storageClassName: {{ .Values.persistence.storageClassName }} + accessModes: + - {{ default "ReadWriteOnce" .Values.persistence.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.capacity }} +{{- end }} diff --git a/helm/didx-cloud/templates/secret.yaml b/helm/didx-cloud/templates/secret.yaml new file mode 100644 index 000000000..340779cd2 --- /dev/null +++ b/helm/didx-cloud/templates/secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.secretData }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "didx-cloud.fullname" . }}-env + labels: + {{- include "didx-cloud.labels" . | nindent 4 }} +type: Opaque +stringData: + {{- range $key, $value := .Values.secretData }} + {{ upper (snakecase $key) }}: {{ tpl (toString $value) $ | quote }} + {{- end }} +{{- end }} diff --git a/helm/didx-cloud/templates/service.yaml b/helm/didx-cloud/templates/service.yaml new file mode 100644 index 000000000..b1aa17caf --- /dev/null +++ b/helm/didx-cloud/templates/service.yaml @@ -0,0 +1,50 @@ +{{- if not .Values.service.hostNetwork }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "didx-cloud.fullname" . }} + labels: + {{- include "didx-cloud.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.service.annotations | nindent 4 }} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.containerPort }} + protocol: TCP + name: {{ trunc 15 .Release.Name }} + {{- with .Values.service.appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- range .Values.addPorts }} + - port: {{ .port }} + targetPort: {{ .containerPort }} + protocol: {{ .protocol | default "TCP" }} + {{- if .name }} + name: {{ tpl .name $ }} + {{- else }} + name: {{ .port }}-{{ .protocol | lower | default "tcp" }} + {{- end }} + {{- with .appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- end }} + {{- range $index, $port := .Values.addPortsRange }} + {{- range untilStep ( .minPort | int ) ( add1 .maxPort | int ) 1 }} + - port: {{ . }} + targetPort: {{ . }} + protocol: {{ $port.protocol | default "TCP" }} + {{- if $port.name }} + name: {{ tpl $port.name $ }} + {{- else }} + name: {{ . }}-{{ $port.protocol | lower | default "tcp" }} + {{- end }} + {{- with .appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "didx-cloud.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/helm/didx-cloud/templates/serviceaccount.yaml b/helm/didx-cloud/templates/serviceaccount.yaml new file mode 100644 index 000000000..969edb827 --- /dev/null +++ b/helm/didx-cloud/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "didx-cloud.serviceAccountName" . }} + labels: + {{- include "didx-cloud.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/helm/didx-cloud/values.yaml b/helm/didx-cloud/values.yaml new file mode 100644 index 000000000..276814798 --- /dev/null +++ b/helm/didx-cloud/values.yaml @@ -0,0 +1,289 @@ +# Default values for the chart +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 + +fullnameOverride: "" + +strategy: + type: RollingUpdate + rollingUpdate: {} + # maxSurge: 25% + # maxUnavailable: 25% +pdb: + enabled: true + # maxUnavailable: 50% + +image: + name: alpine + registry: ghcr.io/didx-xyz + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: latest + +# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +ingress: + internal: + enabled: false + className: nginx-internal + rules: + - host: example.local + paths: + - path: / + port: 80 + external: + enabled: false + className: nginx-external + rules: + - host: example.local + paths: + - path: / + port: 80 + +deploymentLabels: + # tags.datadoghq.com/env: + tags.datadoghq.com/service: '{{ include "didx-cloud.fullname" . }}' + tags.datadoghq.com/version: '{{ .Values.image.tag }}' +deploymentAnnotations: {} +podLabels: + tags.datadoghq.com/service: '{{ include "didx-cloud.fullname" . }}' + tags.datadoghq.com/version: '{{ .Values.image.tag }}' + admission.datadoghq.com/enabled: "false" # disabled by default (for now) +podAnnotations: + # gcr.io/datadoghq/dd-lib-python-init + admission.datadoghq.com/python-lib.version: v2.17.2 + ad.datadoghq.com/istio-proxy.logs: '[{ "source": "envoy", "service": "{{ include "didx-cloud.fullname" . }}" }]' + ad.datadoghq.com/istio-init.logs: '[{ "source": "envoy", "service": "{{ include "didx-cloud.fullname" . }}" }]' + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +# The list of arguments to pass to the Pods entrypoint +args: [] +# args: ["--verbose"] + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 80 + containerPort: 80 + +# A list of additional Ports (TCP by default) to expose over Service +addPorts: [] + # - port: 8080 + # containerPort: 8080 + # - port: 8070 + # containerPort: 8070 + # protocol: UDP + +# A list of additional range of Ports (TCP by default) to expose over Service. +# TODO: Be careful with overlapping Port numbers under different protocols as any changes in such ranges might result in Helm sorting failure on upgrade action +addPortsRange: [] + # - minPort: 49152 + # maxPort: 49452 + # protocol: TCP + # - minPort: 49152 + # maxPort: 49452 + # protocol: UDP + +# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes +livenessProbe: {} + # httpGet: + # path: /healthz + # port: "{{ trunc 15 .Release.Name }}" + # failureThreshold: 1 + # periodSeconds: 10 +readinessProbe: {} + # httpGet: + # path: /healthz + # port: "{{ trunc 15 .Release.Name }}" + # failureThreshold: 1 + # periodSeconds: 10 +startupProbe: {} + # httpGet: + # path: /healthz + # port: "{{ trunc 15 .Release.Name }}" + # failureThreshold: 30 + # periodSeconds: 10 + +# https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ +lifecycle: {} + # postStart: + # exec: + # command: + # - /bin/sh + # - -c + # - echo Hello from the postStart handler > /usr/share/message + # preStop: + # exec: + # command: + # - /bin/sh + # - -c + # - nginx -s quit; while killall -0 nginx; do sleep 1; done + +resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +persistence: + enabled: false + # mountPath: /opt + # capacity: 25Gi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 75 + targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# allow the app to drain connections within two minutes +# terminationGracePeriodSeconds: 120 + +# Specify as many extra containers to run as necessary +extraContainers: {} +# - name: container +# image: {} # same as the main container unless explicitly defined + +# If you have sensitive data to pass as environment variables to app instance, +# you can specify the secrets containing them here +extraSecretNamesForEnvFrom: [] +env: + DD_LOGS_INJECTION: true + DD_PROFILING_ENABLED: false + DD_RUNTIME_METRICS_ENABLED: true + # DD_TRACE_SAMPLE_RATE: "1" + +# ConfigMaps to create. Will be mounted to Pods on specified path. Path must be unique for each mount +configFiles: {} + # myconf.json: + # path: /etc/foo + # content: |- + # { + # "foo": "bar" + # } + # anotherconf.json: + # path: /etc/bar + # content: |- + # { + # "bar": "foo" + # } + +# Creates a secret named {{ include "didx-cloud.fullname" . }}-env +# and automatically passes as environment variables +secretData: {} + +### Istio +istio: + peerAuth: + enabled: false + labels: {} + annotations: {} + conf: + mtls: + mode: PERMISSIVE + + sidecar: + enabled: false + labels: {} + annotations: {} + conf: + egress: + - hosts: + - "./*" + - "istio-system/*" + ingress: [] + outboundTrafficPolicy: + mode: ALLOW_ANY + + virtualService: + enabled: false + labels: {} + annotations: {} + conf: + gateways: [] + # - mesh + # - istio-system/istio-internal + # - istio-system/istio-external + hosts: + - example.local + http: + - name: '{{ include "didx-cloud.fullname" $ }}' + route: + - destination: + host: '{{ include "didx-cloud.fullname" $ }}' + port: + number: 80 + + auth: + authn: + enabled: false + labels: {} + annotations: {} + conf: + jwtRules: + ## https://istio.io/latest/docs/reference/config/security/jwt/ + - issuer: https://keycloak.org/realms/example + ## Where to fetch the JWKS from (optional) + # jwksUri: https://keycloak.org/realms/example/protocol/openid-connect/certs + ## Pass the JWT Payload (Base64 Encoded) to the backend via this header + # outputPayloadToHeader: x-jwt-payload + ## Copy parts of the JWT Payload into HTTP headers + # outputClaimToHeaders: + # - header: x-jwt-iss + # claim: iss + authz: + enabled: false + labels: {} + annotations: {} + conf: + action: ALLOW + rules: + ## https://istio.io/latest/docs/reference/config/security/authorization-policy/#Rule + - when: + - key: request.auth.claims[iss] + values: + - https://keycloak.org/realms/example + # - from: + # - source: + # remoteIpBlocks: + # - 1.2.3.4/32 + # - from: + # - source: + # namespaces: + # - '{{ .Release.Namespace }}' + + envoyFilter: + enabled: false + labels: {} + annotations: {} + patches: []