From 3c73c8ed8b42ce0671316a8756065a35e74916e3 Mon Sep 17 00:00:00 2001 From: Jie Yu Date: Fri, 27 Mar 2020 13:07:28 -0700 Subject: [PATCH] [stable] velero-minio: fix the data loss issue (#200) * velero: copy from previous revision To prepare for upgrade. (cherry picked from commit 29a4fdb8a16f5b3ff37d7105ce8a5ba8c5c27bd7) * velero: fix the data loss issue Velero chart 3.0.0 and 3.0.1 have data loss issue. The reason is because we didn't set the `mountPath` for PV properly in minio. The default mountPath is `/export` while the bucketRoot is at `/data`. This cause velero-minio to start a fresh disk from the container imagefs, rather than from the PV. However, after fixing the `mountPath`, we noticed that the minio server won't start due to "invalid credentials". After researching a bit, we found that minio requires the original access secret to decrypt the data on the PV. As a result, we cannot use a delete and re-install policy because the minio credential secret (namely `minio-creds-secret) created by the init container has an owner reference to the velero deployment. Previously, we annotate the Addon to use a delete and re-install policy to avoid the helm upgrade error because the statefulset `minio` already exists (created by the minio operator in ealier 2.x.y versions, thus is not tracked by helm). And we have to use the same statefulset name beacuse PVC name is derived from the statefulset name. To solve the issue, we have to make the helm upgrade work. Thus in this patch, we install a `pre-install` job to delete objects created by the minio operator in earlier 2.x.y versions, including the statefulset `minio`. For future upgrade (upgrade from helm based minio), this cleanup job should be a no-op. --- addons/velero/1.0.x/velero-4.yaml | 117 ++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 addons/velero/1.0.x/velero-4.yaml diff --git a/addons/velero/1.0.x/velero-4.yaml b/addons/velero/1.0.x/velero-4.yaml new file mode 100644 index 00000000..a7be426a --- /dev/null +++ b/addons/velero/1.0.x/velero-4.yaml @@ -0,0 +1,117 @@ +--- +# ------------------------------------------------------------------------------ +# Velero +# +# +# Velero is an open source backup and migration tool for Kubernetes. +# See more about Velero at: +# +# * https://velero.io/ +# * https://github.com/heptio/velero +# * https://github.com/helm/charts/tree/master/stable/velero +# +# +# Implementation +# +# +# Our implementation of Velero currently supports S3 backends for storage, and by default if no configuration overrides are +# provided to point it at a backend other than the default, we will create and manage a distributed Minio (https://min.io/) +# cluster which uses the default storage class for the cluster to maintain the backups. +# +# +# WARNING: using the default (fallback) backend is for testing purposes only and should not be used in production. +# ------------------------------------------------------------------------------ +apiVersion: kubeaddons.mesosphere.io/v1beta1 +kind: ClusterAddon +metadata: + name: velero + labels: + kubeaddons.mesosphere.io/name: velero + # TODO: we're temporarily supporting dependency on an existing default storage class + # on the cluster, this hack will trigger re-queue on Addons until one exists. + kubeaddons.mesosphere.io/hack-requires-defaultstorageclass: "true" + annotations: + catalog.kubeaddons.mesosphere.io/addon-revision: "1.0.1-4" + values.chart.helm.kubeaddons.mesosphere.io/velero: "https://raw.githubusercontent.com/mesosphere/charts/5327e6a54fe70df550e894fd754541a4f71a9054/staging/velero/values.yaml" +spec: + namespace: velero + kubernetes: + minSupportedVersion: v1.15.6 + cloudProvider: + - name: aws + enabled: true + - name: azure + enabled: true + - name: gcp + enabled: true + - name: none + enabled: true + requires: + - matchLabels: + kubeaddons.mesosphere.io/provides: ingresscontroller + chartReference: + chart: velero + repo: https://mesosphere.github.io/charts/staging + version: 3.0.2 + values: | + --- + configuration: + provider: "aws" + backupStorageLocation: + name: "aws" + bucket: "velero" + config: + region: "fallback" # enables non-production fallback minio backend + s3ForcePathStyle: true # allows usage of fallback backend + s3Url: http://minio.velero.svc:9000 + volumeSnapshotLocation: + name: "aws" + config: + region: "fallback" + credentials: + secretContents: + cloud: "placeholder" + schedules: + default: + schedule: "0 0 * * *" + metrics: + enabled: true + service: + labels: + servicemonitor.kubeaddons.mesosphere.io/path: "metrics" + initContainers: + - name: initialize-velero + image: mesosphere/kubeaddons-addon-initializer:v0.2.5 + args: ["velero"] + env: + - name: "VELERO_MINIO_FALLBACK_SECRET_NAME" + value: "velero-kubeaddons" + minioBackend: true + minio: + mode: distributed + defaultBucket: + enabled: true + name: velero + bucketRoot: "/data" + mountPath: "/data" + existingSecret: minio-creds-secret + livenessProbe: + initialDelaySeconds: 120 + periodSeconds: 20 + resources: + requests: + memory: 256Mi + cpu: 250m + limits: + memory: 512Mi + cpu: 750m + persistence: + volumeTemplatePrefix: data + statefulSetNameOverride: minio + ingress: + enabled: true + hosts: + - "" + annotations: + kubernetes.io/ingress.class: traefik + traefik.ingress.kubernetes.io/frontend-entry-points: velero-minio