Skip to content
This repository has been archived by the owner on Sep 15, 2023. It is now read-only.

Commit

Permalink
Merge pull request #60 from pacoxu/dev-0.0.4
Browse files Browse the repository at this point in the history
Dev 0.0.4
  • Loading branch information
pacoxu authored Jun 6, 2022
2 parents c7c911b + 86491aa commit d3fd73b
Show file tree
Hide file tree
Showing 4 changed files with 67 additions and 60 deletions.
11 changes: 9 additions & 2 deletions commands/upgrade_kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,12 @@ import (
"net/http"
"runtime"
"strings"
"time"

"github.com/go-logr/logr"
"github.com/pkg/errors"

"k8s.io/apimachinery/pkg/util/wait"
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
)

Expand Down Expand Up @@ -58,8 +60,13 @@ func runUpgradeKubeadm(spec *operatorv1.UpgradeKubeadmCommandSpec, log logr.Logg
if spec.Local {
return nil
}

err := DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubeadm", "/usr/bin/kubeadm-"+spec.KubernetesVersion, log)
err := wait.Poll(100*time.Millisecond, 300*time.Second, func() (bool, error) {
if err := DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubeadm", "/usr/bin/kubeadm-"+spec.KubernetesVersion, log); err != nil {
log.Error(err, "Failed to download kubectl and kubelet")
return false, nil
}
return true, nil
})
if err != nil {
return err
}
Expand Down
20 changes: 16 additions & 4 deletions commands/upgrade_kubectlkubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,30 @@ func runUpgradeKubectlAndKubelet(spec *operatorv1.UpgradeKubeletAndKubeactlComma
}
log.Info(fmt.Sprintf("%s", strings.Join(start, "\n")))

// systemctl cannot run inside pod.
err = DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubelet", "/usr/bin/kubelet-"+spec.KubernetesVersion, log)
err = wait.Poll(100*time.Millisecond, 300*time.Second, func() (bool, error) {
if err := DownloadFromOfficialWebsite(spec.KubernetesVersion, "kubelet", "/usr/bin/kubelet-"+spec.KubernetesVersion, log); err != nil {
log.Error(err, "Failed to download kubectl and kubelet")
return false, nil
}
return true, nil
})
if err != nil {
return err
}

// TODO stop kubelet and replace it
cmd = newCmd("/usr/bin/cp", "-f", "/usr/bin/kubelet-"+spec.KubernetesVersion, "/usr/bin/kubelet")
start, err = cmd.RunAndCapture()
if err != nil {
return errors.WithStack(errors.WithMessage(err, strings.Join(start, "\n")))
// skip kubelet replacement
cmd = newCmd("/usr/bin/cp", "-f", "/usr/bin/kubelet-"+spec.KubernetesVersion, "/usr/bin/kubelet-new")
start, err = cmd.RunAndCapture()
if err != nil {
}
// return errors.WithStack(errors.WithMessage(err, strings.Join(start, "\n")))
} else {
log.Info(fmt.Sprintf("%s", strings.Join(start, "\n")))
}
log.Info(fmt.Sprintf("%s", strings.Join(start, "\n")))

return nil
}
59 changes: 22 additions & 37 deletions controllers/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,11 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("30Mi"),
},
// TODO set a suitable limit for agent
// Limits: corev1.ResourceList{
// corev1.ResourceCPU: resource.MustParse("100m"),
// corev1.ResourceMemory: resource.MustParse("30Mi"),
// },
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("20Mi"),
Expand All @@ -169,6 +170,10 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace
Name: "kubelet-binary",
MountPath: "/usr/bin/kubelet",
},
{
Name: "kubelet-new-binary",
MountPath: "/usr/bin/kubelet-new",
},
{
Name: "kubectl-binary",
MountPath: "/usr/bin/kubectl",
Expand Down Expand Up @@ -204,20 +209,9 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace
Name: "etcd-data-dir",
MountPath: "/var/lib/etcd",
},
// below are used to run `systemctl restart kubelet`
// sudo -it ubuntu:16.04 systemctl

{
Name: "run-systemd",
MountPath: "/run/systemd",
},
{
Name: "system-bus",
MountPath: "/var/run/dbus/system_bus_socket",
},
{
Name: "fs-cgroup",
MountPath: "/sys/fs/cgroup",
Name: "var-run",
MountPath: "/var/run/",
},
},
},
Expand All @@ -243,6 +237,15 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace
},
},
},
{
Name: "kubelet-new-binary",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/usr/bin/kubelet-new",
Type: hostPathTypePtr(corev1.HostPathFileOrCreate),
},
},
},
{
Name: "kubeadm-binary",
VolumeSource: corev1.VolumeSource{
Expand Down Expand Up @@ -307,28 +310,10 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace
},
},
{
Name: "run-systemd",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/run/systemd",
Type: hostPathTypePtr(corev1.HostPathDirectory),
},
},
},
{
Name: "system-bus",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var/run/dbus/system_bus_socket",
Type: hostPathTypePtr(corev1.HostPathSocket),
},
},
},
{
Name: "fs-cgroup",
Name: "var-run",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/sys/fs/cgroup",
Path: "/var/run/",
Type: hostPathTypePtr(corev1.HostPathDirectory),
},
},
Expand Down
37 changes: 20 additions & 17 deletions operations/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)

Expand All @@ -36,6 +37,8 @@ func setupUpgrade() map[string]string {
}

func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperationSpec, c client.Client) *operatorv1.RuntimeTaskGroupList {
log := ctrl.Log.WithName("operations").WithName("Upgrade").WithValues("task", operation.Name)

// TODO support upgrade to v1.n-1~v1.n of current kubernetes server version.
// If the current kubernetes server version is v1.n-2 which is below the target version, we need to generate a further upgrade plan

Expand Down Expand Up @@ -77,15 +80,15 @@ func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperat
},
},
operatorv1.CommandDescriptor{
KubeadmUpgradeApply: &operatorv1.KubeadmUpgradeApplyCommandSpec{
DryRun: dryRun,
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
Local: operation.Spec.Upgrade.Local,
},
},
operatorv1.CommandDescriptor{
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{
KubeadmUpgradeApply: &operatorv1.KubeadmUpgradeApplyCommandSpec{
DryRun: dryRun,
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
Local: operation.Spec.Upgrade.Local,
},
},
)
Expand All @@ -98,10 +101,10 @@ func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperat
setCPSelector(&t2)
cpNodes, err := listNodesBySelector(c, &t2.Spec.NodeSelector)
if err != nil {
fmt.Printf("failed to list nodes: %v", err)
log.Info("failed to list nodes.", "error", err)
return nil
}
if cpNodes.Size() > 1 {
if len(cpNodes.Items) > 1 {

t2.Spec.Template.Spec.Commands = append(t2.Spec.Template.Spec.Commands,
operatorv1.CommandDescriptor{
Expand All @@ -110,17 +113,17 @@ func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperat
Local: operation.Spec.Upgrade.Local,
},
},
operatorv1.CommandDescriptor{
KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{
DryRun: operatorv1.OperationExecutionMode(operation.Spec.ExecutionMode) == operatorv1.OperationExecutionModeDryRun,
},
},
operatorv1.CommandDescriptor{
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
Local: operation.Spec.Upgrade.Local,
},
},
operatorv1.CommandDescriptor{
KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{
DryRun: operatorv1.OperationExecutionMode(operation.Spec.ExecutionMode) == operatorv1.OperationExecutionModeDryRun,
},
},
)
items = append(items, t2)
}
Expand All @@ -134,7 +137,7 @@ func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperat
fmt.Printf("failed to list nodes: %v", err)
return nil
}
if workerNodes.Size() > 0 {
if len(workerNodes.Items) > 0 {
t3.Spec.Template.Spec.Commands = append(t3.Spec.Template.Spec.Commands,
operatorv1.CommandDescriptor{
KubectlDrain: &operatorv1.KubectlDrainCommandSpec{},
Expand All @@ -145,17 +148,17 @@ func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperat
Local: operation.Spec.Upgrade.Local,
},
},
operatorv1.CommandDescriptor{
KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{
DryRun: operatorv1.OperationExecutionMode(operation.Spec.ExecutionMode) == operatorv1.OperationExecutionModeDryRun,
},
},
operatorv1.CommandDescriptor{
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
Local: operation.Spec.Upgrade.Local,
},
},
operatorv1.CommandDescriptor{
KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{
DryRun: operatorv1.OperationExecutionMode(operation.Spec.ExecutionMode) == operatorv1.OperationExecutionModeDryRun,
},
},
operatorv1.CommandDescriptor{
KubectlUncordon: &operatorv1.KubectlUncordonCommandSpec{},
},
Expand Down

0 comments on commit d3fd73b

Please sign in to comment.