diff --git a/Dockerfile b/Dockerfile index 544122e..4f09f9b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,17 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager # Refer to https://github.com/GoogleContainerTools/distroless for more details # distroless cannot run `kubeadm upgrade apply` smoothly # FROM gcr.m.daocloud.io/distroless/static:nonroot -FROM docker.m.daocloud.io/centos +FROM docker.m.daocloud.io/ubuntu +RUN apt-get update -q -y && apt-get install -q -y curl systemd && apt clean all +RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in ; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); +RUN rm -rf /lib/systemd/system/multi-user.target.wants/; +RUN rm -rf /etc/systemd/system/.wants/; +RUN rm -rf /lib/systemd/system/local-fs.target.wants/; +RUN rm -rf /lib/systemd/system/sockets.target.wants/udev; +RUN rm -rf /lib/systemd/system/sockets.target.wants/initctl; +RUN rm -rf /lib/systemd/system/basic.target.wants/; +RUN rm -rf /lib/systemd/system/anaconda.target.wants/*; + WORKDIR / COPY --from=builder /workspace/manager . # USER nonroot:nonroot diff --git a/commands/kubeadm_upgrade_apply.go b/commands/kubeadm_upgrade_apply.go index 15270f8..7ac407b 100644 --- a/commands/kubeadm_upgrade_apply.go +++ b/commands/kubeadm_upgrade_apply.go @@ -32,7 +32,7 @@ func runKubeadmUpgradeApply(spec *operatorv1.KubeadmUpgradeApplyCommandSpec, log // TODO: add real dry run support cmd = newCmd("kubeadm", "upgrade", "apply", spec.KubernetesVersion, "--yes", "--v=4") if spec.DryRun { - cmd = newCmd("kubeadm", "upgrade", "apply", spec.KubernetesVersion, "--yes", "--dry-run", "--v=4") + cmd = newCmd("kubeadm", "upgrade", "apply", spec.KubernetesVersion, "--yes", "--dry-run", "--v=5") } lines, err := cmd.RunAndCapture() diff --git a/commands/kubeadm_upgrade_node.go b/commands/kubeadm_upgrade_node.go index 3f4add8..40e7d60 100644 --- a/commands/kubeadm_upgrade_node.go +++ b/commands/kubeadm_upgrade_node.go @@ -31,9 +31,9 @@ func runKubeadmUpgradeNode(spec *operatorv1.KubeadmUpgradeNodeCommandSpec, log l var cmd *cmd // TODO: add real dry run support - cmd = newCmd("kubeadm", "upgrade", "node", "--v=4") + cmd = newCmd("kubeadm", "upgrade", "node", "--v=5") if spec.DryRun { - cmd = newCmd("kubeadm", "upgrade", "node", "--dry-run", "--v=4") + cmd = newCmd("kubeadm", "upgrade", "node", "--dry-run", "--v=5") } lines, err := cmd.RunAndCapture() diff --git a/commands/upgrade_kubeadm.go b/commands/upgrade_kubeadm.go index 3a954c3..421c168 100644 --- a/commands/upgrade_kubeadm.go +++ b/commands/upgrade_kubeadm.go @@ -18,6 +18,7 @@ package commands import ( "fmt" + "net/http" "runtime" "strings" @@ -30,26 +31,61 @@ import ( // sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/${ARCH}/{kubeadm,kubelet,kubectl} const DownloadURLTemplate = "https://storage.googleapis.com/kubernetes-release/release/%s/bin/linux/%s/%s" +// download url tempalte for servers in China that cannot access googleapis.com +const BackupDownloadURLTemplate = "http://dao-get.daocloud.io/kubernetes-release/release/$s/bin/linux/$s/%s" + +func GetDownloadURLTemplate() string { + if canAccessGoogleapis() { + return DownloadURLTemplate + } + return BackupDownloadURLTemplate +} + +func canAccessGoogleapis() bool { + // check a url that can be accessed by google + _, err := http.Get("https://storage.googleapis.com/kubernetes-release/release/v1.24.0/bin/linux/amd64/kubectl") + if err != nil { + print(err.Error()) + return false + } else { + + return true + } +} + // runUpgradeKubeadm will try to download the binary from official websites; func runUpgradeKubeadm(spec *operatorv1.UpgradeKubeadmCommandSpec, log logr.Logger) error { if spec.Local { return nil } - return DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubeadm", log) + err := DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubeadm", "/usr/bin/kubeadm-"+spec.KubernetesVersion, log) + if err != nil { + return err + } + + cmd := newCmd("/usr/bin/cp", "-f", "/usr/bin/kubeadm-"+spec.KubernetesVersion, "/usr/bin/kubeadm") + start, err := cmd.RunAndCapture() + if err != nil { + return errors.WithStack(errors.WithMessage(err, strings.Join(start, "\n"))) + } + log.Info(fmt.Sprintf("%s", strings.Join(start, "\n"))) + + return nil } -func DownloadFromOfficialWebsite(version, bin string, log logr.Logger) error { +func DownloadFromOfficialWebsite(version, bin, targetPath string, log logr.Logger) error { var cmd *cmd - cmd = newCmd("curl", "-L", "--remote-name-all", fmt.Sprintf(DownloadURLTemplate, version, runtime.GOARCH, bin), "-o", "/usr/bin/"+bin) + cmd = newCmd("curl", "-L", "--remote-name-all", fmt.Sprintf(GetDownloadURLTemplate(), version, runtime.GOARCH, bin), "-o", targetPath) + log.Info("download", "command", cmd.command, "args", strings.Join(cmd.args, " ")) donwlod, err := cmd.RunAndCapture() if err != nil { return errors.WithStack(errors.WithMessage(err, strings.Join(donwlod, "\n"))) } log.Info(fmt.Sprintf("%s", strings.Join(donwlod, "\n"))) - cmd = newCmd("chmod", "+x", "/usr/bin/"+bin) + cmd = newCmd("chmod", "+x", targetPath) lines, err := cmd.RunAndCapture() if err != nil { return errors.WithStack(errors.WithMessage(err, strings.Join(lines, "\n"))) diff --git a/commands/upgrade_kubectlkubelet.go b/commands/upgrade_kubectlkubelet.go index 3b3c336..c101b4a 100644 --- a/commands/upgrade_kubectlkubelet.go +++ b/commands/upgrade_kubectlkubelet.go @@ -19,10 +19,12 @@ package commands import ( "fmt" "strings" + "time" "github.com/go-logr/logr" "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/wait" operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) @@ -32,27 +34,33 @@ func runUpgradeKubectlAndKubelet(spec *operatorv1.UpgradeKubeletAndKubeactlComma if spec.Local { return nil } - err := DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubectl", log) + + err := wait.Poll(100*time.Millisecond, 300*time.Second, func() (bool, error) { + if err := DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubectl", "/usr/bin/kubectl-"+spec.KubernetesVersion, log); err != nil { + log.Error(err, "Failed to download kubectl and kubelet") + return false, nil + } + return true, nil + }) if err != nil { - return err + // upgrade can skip kubectl upgrade + log.Info("kubectl upgrade is not significant, so skip") } - - var cmd *cmd - - cmd = newCmd("systemctl", "stop", "kubelet") - stop, err := cmd.RunAndCapture() + cmd := newCmd("/usr/bin/cp", "-f", "/usr/bin/kubectl-"+spec.KubernetesVersion, "/usr/bin/kubectl") + start, err := cmd.RunAndCapture() if err != nil { - return errors.WithStack(errors.WithMessage(err, strings.Join(stop, "\n"))) + return errors.WithStack(errors.WithMessage(err, strings.Join(start, "\n"))) } - log.Info(fmt.Sprintf("%s", strings.Join(stop, "\n"))) + log.Info(fmt.Sprintf("%s", strings.Join(start, "\n"))) - err = DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubelet", log) + // systemctl cannot run inside pod. + err = DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubelet", "/usr/bin/kubelet-"+spec.KubernetesVersion, log) if err != nil { return err } - cmd = newCmd("systemctl", "start", "kubelet") - start, err := cmd.RunAndCapture() + cmd = newCmd("/usr/bin/cp", "-f", "/usr/bin/kubelet-"+spec.KubernetesVersion, "/usr/bin/kubelet") + start, err = cmd.RunAndCapture() if err != nil { return errors.WithStack(errors.WithMessage(err, strings.Join(start, "\n"))) } diff --git a/controllers/operation_controller.go b/controllers/operation_controller.go index fecbc94..9433bb4 100644 --- a/controllers/operation_controller.go +++ b/controllers/operation_controller.go @@ -221,7 +221,7 @@ func (r *OperationReconciler) reconcileLabels(operation *operatorv1.Operation) { func (r *OperationReconciler) reconcileTaskGroups(operation *operatorv1.Operation, log logr.Logger) (*taskGroupReconcileList, error) { // gets all the desired TaskGroup objects for the current operation // Nb. this is the domain knowledge encoded into operation implementations - desired, err := operations.TaskGroupList(operation) + desired, err := operations.TaskGroupList(operation, r.Client) if err != nil { return nil, errors.Wrap(err, "failed to get desired TaskGroup list") } diff --git a/controllers/util.go b/controllers/util.go index bfa0bee..b8b2857 100644 --- a/controllers/util.go +++ b/controllers/util.go @@ -166,15 +166,12 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace MountPath: "/usr/bin/kubeadm", }, { - Name: "etc-kubernetes", - MountPath: "/etc/kubernetes", + Name: "kubelet-binary", + MountPath: "/usr/bin/kubelet", }, - // TODO: use a different volume for the certificates when kubeadm upgrade apply. - // "kubeadm upgrade node" may use different dirs - // var-lib-kubelet-pki is used by kubeadm upgrade apply to store the certificates { - Name: "var-lib-kubelet-pki", - MountPath: "/var/lib/kubelet/pki", + Name: "kubectl-binary", + MountPath: "/usr/bin/kubectl", }, // crictl is used by kubeadm upgrade apply to check the binary like `crictl` { @@ -186,6 +183,17 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace Name: "cp", MountPath: "/usr/bin/cp", }, + { + Name: "etc-kubernetes", + MountPath: "/etc/kubernetes", + }, + // TODO: use a different volume for the certificates when kubeadm upgrade apply. + // "kubeadm upgrade node" may use different dirs + // var-lib-kubelet is used by kubeadm upgrade apply to store the certificates and /var/lib/kubelet/kubeadm-flags.env + { + Name: "var-lib-kubelet", + MountPath: "/var/lib/kubelet/", + }, // run is used to check container runtime status { Name: "run", @@ -196,12 +204,45 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace Name: "etcd-data-dir", MountPath: "/var/lib/etcd", }, + // below are used to run `systemctl restart kubelet` + // sudo -it ubuntu:16.04 systemctl + + { + Name: "run-systemd", + MountPath: "/run/systemd", + }, + { + Name: "system-bus", + MountPath: "/var/run/dbus/system_bus_socket", + }, + { + Name: "fs-cgroup", + MountPath: "/sys/fs/cgroup", + }, }, }, }, TerminationGracePeriodSeconds: pointer.Int64Ptr(10), HostNetwork: true, Volumes: []corev1.Volume{ + { + Name: "kubectl-binary", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/usr/bin/kubectl", + Type: hostPathTypePtr(corev1.HostPathFile), + }, + }, + }, + { + Name: "kubelet-binary", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/usr/bin/kubelet", + Type: hostPathTypePtr(corev1.HostPathFile), + }, + }, + }, { Name: "kubeadm-binary", VolumeSource: corev1.VolumeSource{ @@ -221,10 +262,10 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace }, }, { - Name: "var-lib-kubelet-pki", + Name: "var-lib-kubelet", VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/lib/kubelet/pki", + Path: "/var/lib/kubelet", Type: hostPathTypePtr(corev1.HostPathDirectory), }, }, @@ -234,7 +275,7 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ Path: "/usr/local/bin/crictl", - Type: hostPathTypePtr(corev1.HostPathFile), + Type: hostPathTypePtr(corev1.HostPathFileOrCreate), }, }, }, @@ -265,6 +306,33 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace }, }, }, + { + Name: "run-systemd", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/run/systemd", + Type: hostPathTypePtr(corev1.HostPathDirectory), + }, + }, + }, + { + Name: "system-bus", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/dbus/system_bus_socket", + Type: hostPathTypePtr(corev1.HostPathSocket), + }, + }, + }, + { + Name: "fs-cgroup", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/sys/fs/cgroup", + Type: hostPathTypePtr(corev1.HostPathDirectory), + }, + }, + }, }, }, }, @@ -407,11 +475,11 @@ func getOwnerOperation(ctx context.Context, c client.Client, obj metav1.ObjectMe return nil, errors.Errorf("missing controller ref for %s/%s", obj.Namespace, obj.Name) } -type matchingSelector struct { +type MatchingSelector struct { selector labels.Selector } -func (m matchingSelector) ApplyToList(opts *client.ListOptions) { +func (m MatchingSelector) ApplyToList(opts *client.ListOptions) { opts.LabelSelector = m.selector } @@ -421,7 +489,7 @@ func listNodesBySelector(c client.Client, selector *metav1.LabelSelector) (*core return nil, errors.Wrap(err, "failed to convert TaskGroup.Spec.NodeSelector to a selector") } - o := matchingSelector{selector: s} + o := MatchingSelector{selector: s} nodes := &corev1.NodeList{} if err := c.List( diff --git a/operations/factory.go b/operations/factory.go index 5d09e95..e2871cd 100644 --- a/operations/factory.go +++ b/operations/factory.go @@ -18,6 +18,7 @@ package operations import ( "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/client" operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) @@ -40,13 +41,13 @@ func DaemonSetNodeSelectorLabels(operation *operatorv1.Operation) (map[string]st } // TaskGroupList return the list of TaskGroup to be performed by an operation -func TaskGroupList(operation *operatorv1.Operation) (*operatorv1.RuntimeTaskGroupList, error) { +func TaskGroupList(operation *operatorv1.Operation, c client.Client) (*operatorv1.RuntimeTaskGroupList, error) { if operation.Spec.RenewCertificates != nil { return planRenewCertificates(operation, operation.Spec.RenewCertificates), nil } if operation.Spec.Upgrade != nil { - return planUpgrade(operation, operation.Spec.Upgrade), nil + return planUpgrade(operation, operation.Spec.Upgrade, c), nil } if operation.Spec.CustomOperation != nil { diff --git a/operations/upgrade.go b/operations/upgrade.go index 69c4459..0a62175 100644 --- a/operations/upgrade.go +++ b/operations/upgrade.go @@ -17,101 +17,197 @@ limitations under the License. package operations import ( + "context" + "fmt" + "os" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" ) func setupUpgrade() map[string]string { return map[string]string{} } -func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperationSpec) *operatorv1.RuntimeTaskGroupList { +func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperationSpec, c client.Client) *operatorv1.RuntimeTaskGroupList { + // TODO support upgrade to v1.n-1~v1.n of current kubernetes server version. + // If the current kubernetes server version is v1.n-2 which is below the target version, we need to generate a further upgrade plan + var items []operatorv1.RuntimeTaskGroup dryRun := operation.Spec.GetTypedOperationExecutionMode() == operatorv1.OperationExecutionModeDryRun + serverNeedUpgrade := true + + serverVersion, err := getServerVersion() + if err != nil { + return nil + } + // TODO is it the right way to check if the `kubeadm upgrade apply` is successful? + // check ClusterConfiguration.kubernetesVersion in kubeadm-config configmap? + if operation.Spec.Upgrade.KubernetesVersion == serverVersion { + //skip upgrade if the current kubernetes server version is the same as the target version + serverNeedUpgrade = false + } + + // nodeNeedUpgrade := true + // nodeVersion, err := getNodeVersion(c, getNodeName()) + // if err != nil { + // return nil + // } + // if operation.Spec.Upgrade.KubernetesVersion == nodeVersion { + // //skip upgrade node if the current kubernetes server version is the same as the target version + // nodeNeedUpgrade = false + // } - t1 := createUpgradeApplyTaskGroup(operation, "01", "upgrade-apply") - setCP1Selector(&t1) - // run `upgrade apply`` on the first node of all control plane - t1.Spec.NodeFilter = string(operatorv1.RuntimeTaskGroupNodeFilterHead) - t1.Spec.Template.Spec.Commands = append(t1.Spec.Template.Spec.Commands, - operatorv1.CommandDescriptor{ - UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ - KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, - Local: operation.Spec.Upgrade.Local, + if serverNeedUpgrade { + t1 := createUpgradeApplyTaskGroup(operation, "01", "upgrade-apply") + setCP1Selector(&t1) + // run `upgrade apply`` on the first node of all control plane + t1.Spec.NodeFilter = string(operatorv1.RuntimeTaskGroupNodeFilterHead) + t1.Spec.Template.Spec.Commands = append(t1.Spec.Template.Spec.Commands, + operatorv1.CommandDescriptor{ + UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + Local: operation.Spec.Upgrade.Local, + }, }, - }, - operatorv1.CommandDescriptor{ - KubeadmUpgradeApply: &operatorv1.KubeadmUpgradeApplyCommandSpec{ - DryRun: dryRun, - KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + operatorv1.CommandDescriptor{ + KubeadmUpgradeApply: &operatorv1.KubeadmUpgradeApplyCommandSpec{ + DryRun: dryRun, + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + }, }, - }, - operatorv1.CommandDescriptor{ - UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ - KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, - Local: operation.Spec.Upgrade.Local, + operatorv1.CommandDescriptor{ + UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + Local: operation.Spec.Upgrade.Local, + }, }, - }, - ) - items = append(items, t1) + ) + items = append(items, t1) + } // this can be skipped if there is only one control-plane node. // currently it depends on the selector t2 := createBasicTaskGroup(operation, "02", "upgrade-cp") - setWSelector(&t2) - t2.Spec.Template.Spec.Commands = append(t2.Spec.Template.Spec.Commands, - operatorv1.CommandDescriptor{ - UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ - KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, - Local: operation.Spec.Upgrade.Local, + setCPSelector(&t2) + cpNodes, err := listNodesBySelector(c, &t2.Spec.NodeSelector) + if err != nil { + fmt.Printf("failed to list nodes: %v", err) + return nil + } + if cpNodes.Size() > 1 { + + t2.Spec.Template.Spec.Commands = append(t2.Spec.Template.Spec.Commands, + operatorv1.CommandDescriptor{ + UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + Local: operation.Spec.Upgrade.Local, + }, }, - }, - operatorv1.CommandDescriptor{ - KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{ - DryRun: operatorv1.OperationExecutionMode(operation.Spec.ExecutionMode) == operatorv1.OperationExecutionModeDryRun, + operatorv1.CommandDescriptor{ + KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{ + DryRun: operatorv1.OperationExecutionMode(operation.Spec.ExecutionMode) == operatorv1.OperationExecutionModeDryRun, + }, }, - }, - operatorv1.CommandDescriptor{ - UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ - KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, - Local: operation.Spec.Upgrade.Local, + operatorv1.CommandDescriptor{ + UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + Local: operation.Spec.Upgrade.Local, + }, }, - }, - ) - items = append(items, t2) + ) + items = append(items, t2) + } // this can be skipped if there are no worker nodes. // currently it depends on the selector t3 := createBasicTaskGroup(operation, "02", "upgrade-w") setWSelector(&t3) - - t3.Spec.Template.Spec.Commands = append(t3.Spec.Template.Spec.Commands, - operatorv1.CommandDescriptor{ - KubectlDrain: &operatorv1.KubectlDrainCommandSpec{}, - }, - operatorv1.CommandDescriptor{ - UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ - KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, - Local: operation.Spec.Upgrade.Local, + workerNodes, err := listNodesBySelector(c, &t3.Spec.NodeSelector) + if err != nil { + fmt.Printf("failed to list nodes: %v", err) + return nil + } + if workerNodes.Size() > 0 { + t3.Spec.Template.Spec.Commands = append(t3.Spec.Template.Spec.Commands, + operatorv1.CommandDescriptor{ + KubectlDrain: &operatorv1.KubectlDrainCommandSpec{}, }, - }, - operatorv1.CommandDescriptor{ - KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{ - DryRun: operatorv1.OperationExecutionMode(operation.Spec.ExecutionMode) == operatorv1.OperationExecutionModeDryRun, + operatorv1.CommandDescriptor{ + UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + Local: operation.Spec.Upgrade.Local, + }, }, - }, - operatorv1.CommandDescriptor{ - UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ - KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, - Local: operation.Spec.Upgrade.Local, + operatorv1.CommandDescriptor{ + KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{ + DryRun: operatorv1.OperationExecutionMode(operation.Spec.ExecutionMode) == operatorv1.OperationExecutionModeDryRun, + }, }, - }, - operatorv1.CommandDescriptor{ - KubectlUncordon: &operatorv1.KubectlUncordonCommandSpec{}, - }, - ) - items = append(items, t3) + operatorv1.CommandDescriptor{ + UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + Local: operation.Spec.Upgrade.Local, + }, + }, + operatorv1.CommandDescriptor{ + KubectlUncordon: &operatorv1.KubectlUncordonCommandSpec{}, + }, + ) + items = append(items, t3) + } return &operatorv1.RuntimeTaskGroupList{ Items: items, } } + +// check the current kubernetes server version +func getServerVersion() (string, error) { + config, err := rest.InClusterConfig() + if err != nil { + return "", err + } + clusterclient, err := kubernetes.NewForConfig(config) + if err != nil { + return "", fmt.Errorf("failed to create a cluster client: %w", err) + } + clusterversion, err := clusterclient.Discovery().ServerVersion() + return clusterversion.String(), nil +} + +func getNodeVersion(c client.Client, nodeName string) (string, error) { + node := &corev1.Node{} + err := c.Get(context.TODO(), types.NamespacedName{Name: nodeName}, node) + if err != nil { + return "", err + } + return node.Status.NodeInfo.KubeletVersion, nil +} + +func listNodesBySelector(c client.Client, selector *metav1.LabelSelector) (*corev1.NodeList, error) { + s, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return nil, errors.Wrap(err, "failed to convert TaskGroup.Spec.NodeSelector to a selector") + } + + nodes := &corev1.NodeList{} + if err := c.List( + context.Background(), nodes, + client.MatchingLabelsSelector{Selector: s}, + ); err != nil { + return nil, err + } + + return nodes, nil +} + +func getNodeName() string { + return os.Getenv("MY_NODE_NAME") +}