From 18581a67f75d6a2914e32c806479b8935cabd162 Mon Sep 17 00:00:00 2001 From: Jake Briggs Date: Fri, 22 Nov 2024 12:51:04 -0600 Subject: [PATCH] Postgres to helm (#589) * WIP * Conversion and bugs Postgres-operator is now deployed with helm. Also Fixed a bug with memcached and set permissions for 2 bin files and corrected a bug in bootstrap.sh * fix whitespace * Update bootstrap.sh * Update memcached-helm-overrides.yaml --- .gitmodules | 3 + .../memcached/memcached-helm-overrides.yaml | 172 ++++-- .../postgres-operator-helm-overrides.yaml | 526 ++++++++++++++++++ .../postgresql/postgresql-helm-overrides.yaml | 487 ---------------- bin/install-libvirt.sh | 0 bin/install-memcached.sh | 0 bin/install-postgres-operator.sh | 29 + bootstrap.sh | 2 +- docs/infrastructure-postgresql.md | 12 +- submodules/postgres-operator | 1 + 10 files changed, 705 insertions(+), 527 deletions(-) create mode 100644 base-helm-configs/postgres-operator/postgres-operator-helm-overrides.yaml delete mode 100644 base-helm-configs/postgresql/postgresql-helm-overrides.yaml mode change 100644 => 100755 bin/install-libvirt.sh mode change 100644 => 100755 bin/install-memcached.sh create mode 100755 bin/install-postgres-operator.sh create mode 160000 submodules/postgres-operator diff --git a/.gitmodules b/.gitmodules index 64a92954..ec787b58 100644 --- a/.gitmodules +++ b/.gitmodules @@ -16,3 +16,6 @@ [submodule "submodules/nginx-gateway-fabric"] path = submodules/nginx-gateway-fabric url = https://github.com/nginxinc/nginx-gateway-fabric.git +[submodule "submodules/postgres-operator"] + path = submodules/postgres-operator + url = https://github.com/zalando/postgres-operator.git diff --git a/base-helm-configs/memcached/memcached-helm-overrides.yaml b/base-helm-configs/memcached/memcached-helm-overrides.yaml index fdcd95b8..40175b43 100644 --- a/base-helm-configs/memcached/memcached-helm-overrides.yaml +++ b/base-helm-configs/memcached/memcached-helm-overrides.yaml @@ -8,7 +8,7 @@ ## @param global.storageClass Global StorageClass for Persistent Volume(s) ## global: - imageRegistry: marketplace.azurecr.io + imageRegistry: "" ## E.g. ## imagePullSecrets: ## - myRegistryKeySecretName @@ -39,7 +39,6 @@ commonLabels: {} ## @param commonAnnotations Add annotations to all the deployed resources ## commonAnnotations: {} - ## Enable diagnostic mode in the deployment/statefulset ## diagnosticMode: @@ -54,7 +53,6 @@ diagnosticMode: ## args: - infinity - ## @section Memcached parameters ## Bitnami Memcached image version @@ -70,7 +68,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/memcached - tag: 1.6.17-debian-11-r15 + tag: 1.6.32-debian-12-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -132,7 +130,6 @@ extraEnvVarsCM: "" ## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for Memcached nodes ## extraEnvVarsSecret: "" - ## @section Deployment/Statefulset parameters ## @param replicaCount Number of Memcached nodes @@ -212,21 +209,46 @@ resources: ## Configure Pods Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod ## @param podSecurityContext.enabled Enabled Memcached pods' Security Context +## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy +## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface +## @param podSecurityContext.supplementalGroups Set filesystem extra groups ## @param podSecurityContext.fsGroup Set Memcached pod's Security Context fsGroup ## podSecurityContext: enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] fsGroup: 1001 ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## @param containerSecurityContext.enabled Enabled Memcached containers' Security Context -## @param containerSecurityContext.runAsUser Set Memcached containers' Security Context runAsUser -## @param containerSecurityContext.runAsNonRoot Set Memcached containers' Security Context runAsNonRoot +## @param containerSecurityContext.enabled Enabled containers' Security Context +## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container +## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser +## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup +## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot +## @param containerSecurityContext.privileged Set container's Security Context privileged +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation +## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" +## @param automountServiceAccountToken Mount Service Account token in pod +## +automountServiceAccountToken: false ## @param hostAliases Add deployment host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## @@ -280,7 +302,7 @@ affinity: values: - worker ## @param nodeSelector Node labels for pod assignment -## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ ## nodeSelector: {} ## @param tolerations Tolerations for pod assignment @@ -314,6 +336,10 @@ terminationGracePeriodSeconds: "" updateStrategy: type: RollingUpdate rollingUpdate: {} +## @param emptyDir.medium Override emptyDir Volume type, defaults to emptyDir: {} +## Possible values: "Memory", "" +emptyDir: + medium: "" ## @param extraVolumes Optionally specify extra list of additional volumes for the Memcached pod(s) ## Example Use Case: mount certificates to enable TLS ## e.g: @@ -362,6 +388,11 @@ sidecars: [] ## containerPort: 1234 ## initContainers: [] +## @param enableServiceLinks Whether information about services should be injected into pod's environment variable +## The environment variables injected by service links are not used, but can lead to slow boot times or slow running of the scripts when there are many services in the current namespace. +## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. +## +enableServiceLinks: true ## Memcached Autoscaling ## @param autoscaling.enabled Enable memcached statefulset autoscaling (requires architecture: "high-availability") ## @param autoscaling.minReplicas memcached statefulset autoscaling minimum number of replicas @@ -387,7 +418,6 @@ pdb: maxUnavailable: 1 ## @section Traffic Exposure parameters - service: ## @param service.type Kubernetes Service type ## @@ -404,9 +434,9 @@ service: memcached: "" ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin ## Values: ClientIP or None - ## ref: https://kubernetes.io/docs/user-guide/services/ + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ ## - sessionAffinity: None + sessionAffinity: "" ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity ## sessionAffinityConfig: ## clientIP: @@ -419,7 +449,7 @@ service: ## clusterIP: "" ## @param service.loadBalancerIP Memcached service Load Balancer IP - ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer ## loadBalancerIP: "" ## @param service.loadBalancerSourceRanges Memcached service Load Balancer sources @@ -439,7 +469,69 @@ service: ## @param service.extraPorts Extra ports to expose in the Memcached service (normally used with the `sidecar` value) ## extraPorts: [] - +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: true + ## @param networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Memcached is + ## listening on. When true, Memcached will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param networkPolicy.addExternalClientAccess Allow access from pods with client label set to "true". Ignored if `networkPolicy.allowExternal` is true. + ## + addExternalClientAccess: true + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressPodMatchLabels [object] Labels to match to allow traffic from other pods. Ignored if `networkPolicy.allowExternal` is true. + ## e.g: + ## ingressPodMatchLabels: + ## my-client: "true" + # + ingressPodMatchLabels: {} + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} ## @section Other Parameters ## Service account for Memcached to use. @@ -460,11 +552,10 @@ serviceAccount: ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount ## annotations: {} - ## @section Persistence parameters ## Enable persistence using Persistent Volume Claims -## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## persistence: ## @param persistence.enabled Enable Memcached data persistence using PVC. If false, use emptyDir @@ -488,6 +579,9 @@ persistence: ## @param persistence.annotations Annotations for the PVC ## annotations: {} + ## @param persistence.labels Labels for the PVC + ## + labels: {} ## @param persistence.selector Selector to match an existing Persistent Volume for Memcached's data PVC ## If set, the PVC can't have a PV dynamically provisioned for it ## E.g. @@ -496,7 +590,6 @@ persistence: ## app: my-app ## selector: {} - ## @section Volume Permissions parameters ## @@ -507,17 +600,17 @@ volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume ## enabled: false - ## @param volumePermissions.image.registry Init container volume-permissions image registry - ## @param volumePermissions.image.repository Init container volume-permissions image repository - ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets ## image: registry: docker.io - repository: bitnami/bitnami-shell - tag: 11-debian-11-r40 + repository: bitnami/os-shell + tag: 12-debian-12-r31 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -539,11 +632,12 @@ volumePermissions: ## Init container' Security Context ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container ## containerSecurityContext: + seLinuxOptions: {} runAsUser: 0 - ## Prometheus Exporter / Metrics ## metrics: @@ -552,9 +646,9 @@ metrics: enabled: false ## Bitnami Memcached Prometheus Exporter image ## ref: https://hub.docker.com/r/bitnami/memcached-exporter/tags/ - ## @param metrics.image.registry Memcached exporter image registry - ## @param metrics.image.repository Memcached exporter image repository - ## @param metrics.image.tag Memcached exporter image tag (immutable tags are recommended) + ## @param metrics.image.registry [default: REGISTRY_NAME] Memcached exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/memcached-exporter] Memcached exporter image repository + ## @skip metrics.image.tag Memcached exporter image tag (immutable tags are recommended) ## @param metrics.image.digest Memcached exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param metrics.image.pullPolicy Image pull policy ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array @@ -562,7 +656,7 @@ metrics: image: registry: docker.io repository: bitnami/memcached-exporter - tag: 0.10.0-debian-11-r42 + tag: 0.14.4-debian-12-r9 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -587,14 +681,30 @@ metrics: requests: {} ## Configure Metrics Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param metrics.containerSecurityContext.enabled Enabled Metrics containers' Security Context - ## @param metrics.containerSecurityContext.runAsUser Set Metrics containers' Security Context runAsUser - ## @param metrics.containerSecurityContext.runAsNonRoot Set Metrics containers' Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context + ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged + ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## Configure extra options for Memcached Prometheus exporter containers' liveness, readiness and startup probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes ## @param metrics.livenessProbe.enabled Enable livenessProbe on Memcached Prometheus exporter containers @@ -667,7 +777,7 @@ metrics: clusterIP: "" ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin ## Values: ClientIP or None - ## ref: https://kubernetes.io/docs/user-guide/services/ + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ ## sessionAffinity: None ## @param metrics.service.annotations [object] Annotations for the Prometheus metrics service diff --git a/base-helm-configs/postgres-operator/postgres-operator-helm-overrides.yaml b/base-helm-configs/postgres-operator/postgres-operator-helm-overrides.yaml new file mode 100644 index 00000000..43ee7cbd --- /dev/null +++ b/base-helm-configs/postgres-operator/postgres-operator-helm-overrides.yaml @@ -0,0 +1,526 @@ +image: + registry: ghcr.io + repository: zalando/postgres-operator + tag: v1.12.2 + pullPolicy: "IfNotPresent" + +# Optionally specify an array of imagePullSecrets. +# Secrets must be manually created in the namespace. +# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +# imagePullSecrets: +# - name: myRegistryKeySecretName + +podAnnotations: {} +podLabels: {} + +configTarget: "OperatorConfigurationCRD" + +# JSON logging format +enableJsonLogging: false + +# general configuration parameters +configGeneral: + # the deployment should create/update the CRDs + enable_crd_registration: true + # specify categories under which crds should be listed + crd_categories: + - "all" + # update only the statefulsets without immediately doing the rolling update + enable_lazy_spilo_upgrade: false + # set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION + enable_pgversion_env_var: true + # start any new database pod without limitations on shm memory + enable_shm_volume: true + # enables backwards compatible path between Spilo 12 and Spilo 13+ images + enable_spilo_wal_path_compat: false + # operator will sync only clusters where name starts with teamId prefix + enable_team_id_clustername_prefix: false + # etcd connection string for Patroni. Empty uses K8s-native DCS. + etcd_host: "" + # Spilo docker image + docker_image: ghcr.io/zalando/spilo-16:3.2-p3 + + # key name for annotation to ignore globally configured instance limits + # ignore_instance_limits_annotation_key: "" + + # Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s) + # kubernetes_use_configmaps: false + + # min number of instances in Postgres cluster. -1 = no limit + min_instances: -1 + # max number of instances in Postgres cluster. -1 = no limit + max_instances: -1 + # period between consecutive repair requests + repair_period: 5m + # period between consecutive sync requests + resync_period: 30m + # can prevent certain cases of memory overcommitment + # set_memory_request_to_limit: false + + # map of sidecar names to docker images + # sidecar_docker_images: + # example: "exampleimage:exampletag" + + # number of routines the operator spawns to process requests concurrently + workers: 8 + +# parameters describing Postgres users +configUsers: + # roles to be granted to database owners + # additional_owner_roles: + # - cron_admin + + # enable password rotation for app users that are not database owners + enable_password_rotation: false + # rotation interval for updating credentials in K8s secrets of app users + password_rotation_interval: 90 + # retention interval to keep rotation users + password_rotation_user_retention: 180 + # postgres username used for replication between instances + replication_username: standby + # postgres superuser name to be created by initdb + super_username: postgres + +configMajorVersionUpgrade: + # "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too + major_version_upgrade_mode: "off" + # upgrades will only be carried out for clusters of listed teams when mode is "off" + # major_version_upgrade_team_allow_list: + # - acid + + # minimal Postgres major version that will not automatically be upgraded + minimal_major_version: "12" + # target Postgres major version when upgrading clusters automatically + target_major_version: "16" + +configKubernetes: + # list of additional capabilities for postgres container + # additional_pod_capabilities: + # - "SYS_NICE" + + # default DNS domain of K8s cluster where operator is running + cluster_domain: cluster.local + # additional labels assigned to the cluster objects + cluster_labels: + application: spilo + # label assigned to Kubernetes objects created by the operator + cluster_name_label: cluster-name + # additional annotations to add to every database pod + # custom_pod_annotations: + # keya: valuea + # keyb: valueb + + # key name for annotation that compares manifest value with current date + # delete_annotation_date_key: "delete-date" + + # key name for annotation that compares manifest value with cluster name + # delete_annotation_name_key: "delete-clustername" + + # list of annotations propagated from cluster manifest to statefulset and deployment + # downscaler_annotations: + # - deployment-time + # - downscaler/* + + # allow user secrets in other namespaces than the Postgres cluster + enable_cross_namespace_secret: false + # use finalizers to ensure all managed resources are deleted prior to the postgresql CR + # this avoids stale resources in case the operator misses a delete event or is not running + # during deletion + enable_finalizers: false + # enables initContainers to run actions before Spilo is started + enable_init_containers: true + # toggles if operator should delete secrets on cluster deletion + enable_secrets_deletion: true + # toggles if operator should delete PVCs on cluster deletion + enable_persistent_volume_claim_deletion: true + # toggles pod anti affinity on the Postgres pods + enable_pod_antiaffinity: false + # toggles PDB to set to MinAvailabe 0 or 1 + enable_pod_disruption_budget: true + # toogles readiness probe for database pods + enable_readiness_probe: false + # enables sidecar containers to run alongside Spilo in the same pod + enable_sidecars: true + + # annotations to be ignored when comparing statefulsets, services etc. + # ignored_annotations: + # - k8s.v1.cni.cncf.io/network-status + + # namespaced name of the secret containing infrastructure roles names and passwords + # infrastructure_roles_secret_name: postgresql-infrastructure-roles + + # list of annotation keys that can be inherited from the cluster manifest + # inherited_annotations: + # - owned-by + + # list of label keys that can be inherited from the cluster manifest + # inherited_labels: + # - application + # - environment + + # timeout for successful migration of master pods from unschedulable node + # master_pod_move_timeout: 20m + + # set of labels that a running and active node should possess to be considered ready + # node_readiness_label: + # status: ready + + # defines how nodeAffinity from manifest should be merged with node_readiness_label + # node_readiness_label_merge: "OR" + + # namespaced name of the secret containing the OAuth2 token to pass to the teams API + # oauth_token_secret_name: postgresql-operator + + # toggle if `spilo-role=master` selector should be added to the PDB (Pod Disruption Budget) + pdb_master_label_selector: true + # defines the template for PDB names + pdb_name_format: "postgres-{cluster}-pdb" + # specify the PVC retention policy when scaling down and/or deleting + persistent_volume_claim_retention_policy: + when_deleted: "retain" + when_scaled: "retain" + # switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution` + pod_antiaffinity_preferred_during_scheduling: false + # override topology key for pod anti affinity + pod_antiaffinity_topology_key: "kubernetes.io/hostname" + # namespaced name of the ConfigMap with environment variables to populate on every pod + # pod_environment_configmap: "default/my-custom-config" + # name of the Secret (in cluster namespace) with environment variables to populate on every pod + # pod_environment_secret: "my-custom-secret" + + # specify the pod management policy of stateful sets of Postgres clusters + pod_management_policy: "ordered_ready" + # label assigned to the Postgres pods (and services/endpoints) + pod_role_label: spilo-role + # service account definition as JSON/YAML string to be used by postgres cluster pods + # pod_service_account_definition: "" + + # role binding definition as JSON/YAML string to be used by pod service account + # pod_service_account_role_binding_definition: "" + + # Postgres pods are terminated forcefully after this timeout + pod_terminate_grace_period: 5m + # template for database user secrets generated by the operator, + # here username contains the namespace in the format namespace.username + # if the user is in different namespace than cluster and cross namespace secrets + # are enabled via `enable_cross_namespace_secret` flag in the configuration. + secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + # sharing unix socket of PostgreSQL (`pg_socket`) with the sidecars + share_pgsocket_with_sidecars: false + # set user and group for the spilo container (required to run Spilo as non-root process) + # spilo_runasuser: 101 + # spilo_runasgroup: 103 + + # group ID with write-access to volumes (required to run Spilo as non-root process) + # spilo_fsgroup: 103 + + # whether the Spilo container should run in privileged mode + spilo_privileged: false + # whether the Spilo container should run with additional permissions other than parent. + # required by cron which needs setuid + spilo_allow_privilege_escalation: true + # storage resize strategy, available options are: ebs, pvc, off or mixed + storage_resize_mode: pvc + # pod toleration assigned to instances of every Postgres cluster + # toleration: + # key: db-only + # operator: Exists + # effect: NoSchedule + + # operator watches for postgres objects in the given namespace + watched_namespace: "*" # listen to all namespaces + +# configure resource requests for the Postgres pods +configPostgresPodResources: + # CPU limits for the postgres containers + default_cpu_limit: "1" + # CPU request value for the postgres containers + default_cpu_request: 100m + # memory limits for the postgres containers + default_memory_limit: 500Mi + # memory request value for the postgres containers + default_memory_request: 100Mi + # optional upper boundary for CPU request + # max_cpu_request: "1" + + # optional upper boundary for memory request + # max_memory_request: 4Gi + + # hard CPU minimum required to properly run a Postgres cluster + min_cpu_limit: 250m + # hard memory minimum required to properly run a Postgres cluster + min_memory_limit: 250Mi + +# timeouts related to some operator actions +configTimeouts: + # interval between consecutive attempts of operator calling the Patroni API + patroni_api_check_interval: 1s + # timeout when waiting for successful response from Patroni API + patroni_api_check_timeout: 5s + # timeout when waiting for the Postgres pods to be deleted + pod_deletion_wait_timeout: 10m + # timeout when waiting for pod role and cluster labels + pod_label_wait_timeout: 10m + # interval between consecutive attempts waiting for postgresql CRD to be created + ready_wait_interval: 3s + # timeout for the complete postgres CRD creation + ready_wait_timeout: 30s + # interval to wait between consecutive attempts to check for some K8s resources + resource_check_interval: 3s + # timeout when waiting for the presence of a certain K8s resource (e.g. Sts, PDB) + resource_check_timeout: 10m + +# configure behavior of load balancers +configLoadBalancer: + # DNS zone for cluster DNS name when load balancer is configured for cluster + db_hosted_zone: db.example.com + # annotations to apply to service when load balancing is enabled + # custom_service_annotations: + # keyx: valuez + # keya: valuea + + # toggles service type load balancer pointing to the master pod of the cluster + enable_master_load_balancer: false + # toggles service type load balancer pointing to the master pooler pod of the cluster + enable_master_pooler_load_balancer: false + # toggles service type load balancer pointing to the replica pod of the cluster + enable_replica_load_balancer: false + # toggles service type load balancer pointing to the replica pooler pod of the cluster + enable_replica_pooler_load_balancer: false + # define external traffic policy for the load balancer + external_traffic_policy: "Cluster" + # defines the DNS name string template for the master load balancer cluster + master_dns_name_format: "{cluster}.{namespace}.{hostedzone}" + # deprecated DNS template for master load balancer using team name + master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}" + # defines the DNS name string template for the replica load balancer cluster + replica_dns_name_format: "{cluster}-repl.{namespace}.{hostedzone}" + # deprecated DNS template for replica load balancer using team name + replica_legacy_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" + +# options to aid debugging of the operator itself +configDebug: + # toggles verbose debug logs from the operator + debug_logging: true + # toggles operator functionality that require access to the postgres database + enable_database_access: true + +# parameters affecting logging and REST API listener +configLoggingRestApi: + # REST API listener listens to this port + api_port: 8080 + # number of entries in the cluster history ring buffer + cluster_history_entries: 1000 + # number of lines in the ring buffer used to store cluster logs + ring_log_lines: 100 + +# configure interaction with non-Kubernetes objects from AWS or GCP +configAwsOrGcp: + # Additional Secret (aws or gcp credentials) to mount in the pod + # additional_secret_mount: "some-secret-name" + + # Path to mount the above Secret in the filesystem of the container(s) + # additional_secret_mount_path: "/some/dir" + + # AWS region used to store EBS volumes + aws_region: eu-central-1 + + # enable automatic migration on AWS from gp2 to gp3 volumes + enable_ebs_gp3_migration: false + # defines maximum volume size in GB until which auto migration happens + # enable_ebs_gp3_migration_max_size: 1000 + + # GCP credentials that will be used by the operator / pods + # gcp_credentials: "" + + # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods + # kube_iam_role: "" + + # S3 bucket to use for shipping postgres daily logs + # log_s3_bucket: "" + + # S3 bucket to use for shipping WAL segments with WAL-E + # wal_s3_bucket: "" + + # GCS bucket to use for shipping WAL segments with WAL-E + # wal_gs_bucket: "" + + # Azure Storage Account to use for shipping WAL segments with WAL-G + # wal_az_storage_account: "" + +# configure K8s cron job managed by the operator +configLogicalBackup: + # Azure Storage Account specs to store backup results + # logical_backup_azure_storage_account_name: "" + # logical_backup_azure_storage_container: "" + # logical_backup_azure_storage_account_key: "" + + # resources for logical backup pod, if empty configPostgresPodResources will be used + # logical_backup_cpu_limit: "" + # logical_backup_cpu_request: "" + # logical_backup_memory_limit: "" + # logical_backup_memory_request: "" + + # image for pods of the logical backup job (example runs pg_dumpall) + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2" + # path of google cloud service account json file + # logical_backup_google_application_credentials: "" + + # prefix for the backup job name + logical_backup_job_prefix: "logical-backup-" + # storage provider - either "s3", "gcs" or "az" + logical_backup_provider: "s3" + # S3 Access Key ID + logical_backup_s3_access_key_id: "" + # S3 bucket to store backup results + logical_backup_s3_bucket: "my-bucket-url" + # S3 bucket prefix to use + logical_backup_s3_bucket_prefix: "spilo" + # S3 region of bucket + logical_backup_s3_region: "" + # S3 endpoint url when not using AWS + logical_backup_s3_endpoint: "" + # S3 Secret Access Key + logical_backup_s3_secret_access_key: "" + # S3 server side encryption + logical_backup_s3_sse: "AES256" + # S3 retention time for stored backups for example "2 week" or "7 days" + logical_backup_s3_retention_time: "" + # backup schedule in the cron format + logical_backup_schedule: "30 00 * * *" + # secret to be used as reference for env variables in cronjob + logical_backup_cronjob_environment_secret: "" + +# automate creation of human users with teams API service +configTeamsApi: + # team_admin_role will have the rights to grant roles coming from PG manifests + enable_admin_role_for_users: true + # operator watches for PostgresTeam CRs to assign additional teams and members to clusters + enable_postgres_team_crd: false + # toogle to create additional superuser teams from PostgresTeam CRs + enable_postgres_team_crd_superusers: false + # toggle to automatically rename roles of former team members and deny LOGIN + enable_team_member_deprecation: false + # toggle to grant superuser to team members created from the Teams API + enable_team_superuser: false + # toggles usage of the Teams API by the operator + enable_teams_api: false + # should contain a URL to use for authentication (username and token) + # pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees + + # operator will add all team member roles to this group and add a pg_hba line + pam_role_name: zalandos + # List of teams which members need the superuser role in each Postgres cluster + postgres_superuser_teams: + - postgres_superusers + # List of roles that cannot be overwritten by an application, team or infrastructure role + protected_role_names: + - admin + - cron_admin + # Suffix to add if members are removed from TeamsAPI or PostgresTeam CRD + role_deletion_suffix: "_deleted" + # role name to grant to team members created from the Teams API + team_admin_role: admin + # postgres config parameters to apply to each team member role + team_api_role_configuration: + log_statement: all + # URL of the Teams API service + # teams_api_url: http://fake-teams-api.default.svc.cluster.local + +# configure connection pooler deployment created by the operator +configConnectionPooler: + # db schema to install lookup function into + connection_pooler_schema: "pooler" + # db user for pooler to use + connection_pooler_user: "pooler" + # docker image + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32" + # max db connections the pooler should hold + connection_pooler_max_db_connections: 60 + # default pooling mode + connection_pooler_mode: "transaction" + # number of pooler instances + connection_pooler_number_of_instances: 2 + # default resources + connection_pooler_default_cpu_request: 500m + connection_pooler_default_memory_request: 100Mi + connection_pooler_default_cpu_limit: "1" + connection_pooler_default_memory_limit: 100Mi + +configPatroni: + # enable Patroni DCS failsafe_mode feature + enable_patroni_failsafe_mode: false + +# Zalando's internal CDC stream feature +enableStreams: false + +rbac: + # Specifies whether RBAC resources should be created + create: true + # Specifies whether ClusterRoles that are aggregated into the K8s default roles should be created. (https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings) + createAggregateClusterRoles: false + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podServiceAccount: + # The name of the ServiceAccount to be used by postgres cluster pods + # If not set a name is generated using the fullname template and "-pod" suffix + name: "postgres-pod" + +# priority class for operator pod +priorityClassName: "" + +# priority class for database pods +podPriorityClassName: + # If create is false with no name set, no podPriorityClassName is specified. + # Hence, the pod priorityClass is the one with globalDefault set. + # If there is no PriorityClass with globalDefault set, the priority of Pods with no priorityClassName is zero. + create: true + # If not set a name is generated using the fullname template and "-pod" suffix + name: "" + priority: 1000000 + +resources: + limits: + cpu: 500m + memory: 500Mi + requests: + cpu: 100m + memory: 250Mi + +securityContext: + runAsUser: 1000 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + +# Allow to setup operator Deployment readiness probe +readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + +# Affinity for pod assignment +# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +# Tolerations for pod assignment +# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +controllerID: + # Specifies whether a controller ID should be defined for the operator + # Note, all postgres manifest must then contain the following annotation to be found by this operator + # "acid.zalan.do/controller": + create: false + # The name of the controller ID to use. + # If not set and create is true, a name is generated using the fullname template + name: diff --git a/base-helm-configs/postgresql/postgresql-helm-overrides.yaml b/base-helm-configs/postgresql/postgresql-helm-overrides.yaml deleted file mode 100644 index e0f3685f..00000000 --- a/base-helm-configs/postgresql/postgresql-helm-overrides.yaml +++ /dev/null @@ -1,487 +0,0 @@ ---- -release_group: null - -pod: - security_context: - prometheus_postgresql_exporter: - pod: - runAsUser: 65534 - container: - postgresql_exporter: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - server: - pod: - runAsUser: 999 - # fsGroup used to allows cert file be witten to file. - fsGroup: 999 - container: - set_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true - postgresql: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - postgresql_backup: - pod: - runAsUser: 65534 - container: - backup_perms: - runAsUser: 0 - readOnlyRootFilesystem: true - postgresql_backup: - runAsUser: 65534 - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - create_user: - pod: - runAsUser: 65534 - container: - prometheus_postgresql_exporter_create_user: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - replicas: - # only 1 replica currently supported - server: 1 - prometheus_postgresql_exporter: 1 - lifecycle: - upgrades: - statefulsets: - pod_replacement_strategy: OnDelete - partition: 0 - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - termination_grace_period: - prometheus_postgresql_exporter: - timeout: 30 - server: - timeout: 180 - probes: - server: - postgresql: - liveness: - enabled: true - params: - initialDelaySeconds: 30 - timeoutSeconds: 5 - failureThreshold: 10 - readiness: - enabled: false - params: - initialDelaySeconds: 30 - timeoutSeconds: 5 - failureThreshold: 10 - resources: - enabled: true - server: - requests: - memory: "1024Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - test: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - prometheus_postgresql_exporter: - limits: - memory: "4096Mi" - cpu: "2000m" - requests: - memory: "128Mi" - cpu: "500m" - jobs: - image_repo_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - prometheus_postgresql_exporter_create_user: - limits: - memory: "1024Mi" - cpu: "2000m" - requests: - memory: "128Mi" - cpu: "100m" - postgresql_backup: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_user: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - -# using dockerhub postgresql: https://hub.docker.com/r/library/postgres/tags/ -images: - tags: - postgresql: "docker.io/library/postgres:14.5" - dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 - image_repo_sync: docker.io/library/docker:17.07.0 - ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic - prometheus_postgresql_exporter: docker.io/wrouesnel/postgres_exporter:v0.4.6 - prometheus_postgresql_exporter_create_user: "docker.io/library/postgres:14.5" - postgresql_backup: "quay.io/airshipit/porthole-postgresql-utility:latest-ubuntu_bionic" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -storage: - # Ensure PVC sizes are appropriate for the given environment - pvc: - enabled: true - size: 5Gi - class_name: general - class_path: volume.beta.kubernetes.io/storage-class - archive_pvc: - size: 5Gi - class_name: general - class_path: volume.beta.kubernetes.io/storage-class - host: - host_path: /data/openstack-helm/postgresql - mount: - path: /var/lib/postgresql - subpath: . - archive: - mount_path: /var/lib/archive - archive_limit: 60 - -labels: - server: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selectory_key: openstack-control-plane - node_selector_value: enabled - prometheus_postgresql_exporter: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -dependencies: - dynamic: - common: - local_image_registry: - jobs: - - postgresql-image-repo-sync - services: - - endpoint: node - service: local_image_registry - static: - postgresql_backup: - jobs: - - postgresql-ks-user - services: - - endpoint: internal - service: postgresql - tests: - services: - - endpoint: internal - service: postgresql - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - prometheus_postgresql_exporter_create_user: - services: - - endpoint: internal - service: postgresql - prometheus_postgresql_exporter: - services: - - endpoint: internal - service: postgresql - jobs: - - prometheus-postgresql-exporter-create-user - -monitoring: - prometheus: - enabled: false - postgresql_exporter: - scrape: false - -volume: - backup: - enabled: true - class_name: general - size: 5Gi - -jobs: - postgresql_backup: - # activeDeadlineSeconds == 0 means no deadline - activeDeadlineSeconds: 0 - backoffLimit: 6 - cron: "15 0 * * *" - history: - success: 3 - failed: 1 - ks_user: - # activeDeadlineSeconds == 0 means no deadline - activeDeadlineSeconds: 0 - backoffLimit: 6 - -network_policy: - postgresql: - ingress: - - {} - egress: - - {} - -conf: - debug: false - pg_hba: | - host all all 127.0.0.1/32 trust - host all all 0.0.0.0/0 md5 - local all all trust - - postgresql: - archive_mode: 'on' - archive_command: 'test ! -f /var/lib/archive/%f && gzip < %p > /var/lib/archive/%f' - cluster_name: 'postgresql' - datestyle: 'iso, mdy' - external_pid_file: '/tmp/postgres.pid' - fsync: 'on' - listen_addresses: '0.0.0.0' - log_checkpoints: 'on' - log_connections: 'on' - log_disconnections: 'on' - log_line_prefix: 'postgresql: %t [%p]: [%l-1] %c %x %d %u %a %h %m ' - log_lock_waits: 'on' - log_temp_files: '0' - log_timezone: 'UTC' - max_connections: '1000' - max_locks_per_transaction: '64' - max_prepared_transactions: '0' - max_wal_senders: '16' - max_worker_processes: '10' - port: '5432' - shared_buffers: '2GB' - ssl: 'off' - ssl_cert_file: '/server_certs/tls.crt' - ssl_ca_file: '/server_certs/ca.crt' - ssl_key_file: '/server_certs/tls.key' - ssl_ciphers: 'TLSv1.2:!aNULL' - tcp_keepalives_idle: '900' - tcp_keepalives_interval: '100' - timezone: 'UTC' - track_commit_timestamp: 'on' - track_functions: 'all' - wal_keep_size: '256' - wal_level: 'hot_standby' - wal_log_hints: 'on' - hba_file: '/tmp/pg_hba.conf' - ident_file: '/tmp/pg_ident.conf' - backup: - enabled: true - base_path: /var/backup - days_to_keep: 3 - pg_dumpall_options: '--inserts --clean' - remote_backup: - enabled: true - container_name: postgresql - days_to_keep: 14 - storage_policy: default-placement - number_of_retries: 5 - delay_range: - min: 30 - max: 60 - throttle_backups: - enabled: false - sessions_limit: 480 - lock_expire_after: 7200 - retry_after: 3600 - container_name: throttle-backups-manager - - exporter: - queries: - pg_postmaster: - query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()" - master: true - metrics: - - start_time_seconds: - usage: "GAUGE" - description: "Time at which postmaster started" - -secrets: - oci_image_registry: - postgresql: postgresql-oci-image-registry-key - postgresql: - admin: postgresql-admin - exporter: postgresql-exporter - audit: postgresql-audit - backup_restore: postgresql-backup-restore - tls: - server: - internal: postgresql-tls-direct - identity: - admin: keystone-admin-user - postgresql: postgresql-backup-user - -endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - postresql: - username: postresql - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - postgresql: - auth: - admin: - username: postgres - password: password - exporter: - username: psql_exporter - password: psql_exp_pass - audit: - username: audit - password: password - hosts: - default: postgresql - host_fqdn_override: - default: null - path: null - scheme: postgresql - port: - postgresql: - default: 5432 - internal: 5432 - service: 5432 - postgresql_restapi: - hosts: - default: postgresql-restapi - host_fqdn_override: - default: null - path: null - scheme: postgresql - port: - restapi: - default: 8008 - internal: 8008 - service: 8008 - prometheus_postgresql_exporter: - namespace: null - hosts: - default: postgresql-exporter - host_fqdn_override: - default: null - path: - default: /metrics - scheme: - default: 'http' - port: - metrics: - default: 9187 - identity: - name: backup-storage-auth - namespace: openstack - auth: - admin: - # Auth URL of null indicates local authentication - # HTK will form the URL unless specified here - auth_url: null - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - postgresql: - # Auth URL of null indicates local authentication - # HTK will form the URL unless specified here - auth_url: null - role: admin - region_name: RegionOne - username: postgresql-backup-user - password: password - project_name: service - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: 'http' - port: - api: - default: 80 - internal: 5000 - -manifests: - certificates: false - configmap_bin: true - configmap_etc: true - job_image_repo_sync: true - network_policy: false - job_ks_user: true - secret_admin: true - secret_etc: true - secret_audit: true - secret_backup_restore: false - secret_registry: true - service: true - statefulset: true - cron_job_postgresql_backup: true - pvc_backup: true - monitoring: - prometheus: - configmap_bin: false - configmap_etc: false - deployment_exporter: false - job_user_create: false - secret_etc: false - service_exporter: false -... diff --git a/bin/install-libvirt.sh b/bin/install-libvirt.sh old mode 100644 new mode 100755 diff --git a/bin/install-memcached.sh b/bin/install-memcached.sh old mode 100644 new mode 100755 diff --git a/bin/install-postgres-operator.sh b/bin/install-postgres-operator.sh new file mode 100755 index 00000000..e392598e --- /dev/null +++ b/bin/install-postgres-operator.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Directory to check for YAML files +CONFIG_DIR="/etc/genestack/helm-configs/postgres-operator" + +pushd /opt/genestack/submodules/postgres-operator/charts || exit + +# Base helm command setup +HELM_CMD="helm upgrade --install postgres-operator ./postgres-operator \ + --namespace=postgres-system \ + --create-namespace \ + --timeout 120m" + +# Add the base overrides file +HELM_CMD+=" -f /opt/genestack/base-helm-configs/postgres-operator/postgres-operator-helm-overrides.yaml" + +# Check if YAML files exist in the specified directory +if compgen -G "${CONFIG_DIR}/*.yaml" > /dev/null; then + # Append all YAML files from the directory to the helm command + for yaml_file in "${CONFIG_DIR}"/*.yaml; do + HELM_CMD+=" -f ${yaml_file}" + done +fi + +# Run the helm command +eval "${HELM_CMD}" +echo "${HELM_CMD}" + +popd || exit diff --git a/bootstrap.sh b/bootstrap.sh index 77c89d2f..f876e3c3 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -110,7 +110,7 @@ done # Symlink /opt/genestack/kustomize.sh to # /etc/genestack/kustomize/kustomize.sh -ln -s $base_source_dir/base-kustomize/kustomize.sh $base_target_dir/kustomize/kustomize.sh +ln -s $base_source_dir/kustomize.sh $base_target_dir/kustomize.sh # Ensure kustomization.yaml exists in each # service base/overlay directory diff --git a/docs/infrastructure-postgresql.md b/docs/infrastructure-postgresql.md index e0534962..0eb72ab4 100644 --- a/docs/infrastructure-postgresql.md +++ b/docs/infrastructure-postgresql.md @@ -9,15 +9,11 @@ We are using the [Zalando postgres-operator](https://github. com/zalando/postgres-operator/) which offers easy to run and highly-available PostgreSQL clusters on Kubernetes. -_The following command to install the operator must be run twice, at least for -now, due to a race condition with the way kubectl handles the CRD -installation._ +!!! example "Run the postgres-operator deployment Script `bin/install-postgres-operator.sh`" -``` shell -kubectl kustomize --enable-helm /etc/genestack/kustomize/postgres-operator | kubectl apply -f - -sleep 10 -kubectl kustomize --enable-helm /etc/genestack/kustomize/postgres-operator | kubectl apply -f - -``` + ``` shell + --8<-- "bin/install-postgres-operator.sh" + ``` ## Create the PostgreSQL Cluster diff --git a/submodules/postgres-operator b/submodules/postgres-operator new file mode 160000 index 00000000..7c7aa969 --- /dev/null +++ b/submodules/postgres-operator @@ -0,0 +1 @@ +Subproject commit 7c7aa969350bef67d0283d93589499ae3d114edb