Skip to content

Commit

Permalink
Fix sa token issue in 1.24 (#252)
Browse files Browse the repository at this point in the history
* Fix sa token issue in 1.24

Signed-off-by: Jian Qiu <[email protected]>

* fix flaky e2e

Signed-off-by: Jian Qiu <[email protected]>

* Use patch for accept

Signed-off-by: Jian Qiu <[email protected]>

* Fix e2e test issue for upgrade

Signed-off-by: Jian Qiu <[email protected]>

* Refactor e2e

Signed-off-by: Jian Qiu <[email protected]>

Signed-off-by: Jian Qiu <[email protected]>
  • Loading branch information
qiujian16 authored Aug 22, 2022
1 parent b92405e commit f050413
Show file tree
Hide file tree
Showing 25 changed files with 399 additions and 388 deletions.
23 changes: 0 additions & 23 deletions .github/workflows/e2e-tests.yaml

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,16 @@ jobs:
go-version: 1.17
- name: Integration Tests
run: make test-integration

e2e-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2

- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17

- name: E2E Tests
run: make test-e2e
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ require (
k8s.io/component-base v0.24.3
k8s.io/klog/v2 v2.60.1
k8s.io/kubectl v0.24.3
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
open-cluster-management.io/api v0.8.0
open-cluster-management.io/cluster-proxy v0.1.2
open-cluster-management.io/managed-serviceaccount v0.2.1-0.20220427065210-de6a7b7b5be8
Expand Down Expand Up @@ -117,7 +118,6 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/kube-aggregator v0.24.0 // indirect
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kube-storage-version-migrator v0.0.5 // indirect
sigs.k8s.io/kustomize/api v0.11.4 // indirect
Expand Down
19 changes: 10 additions & 9 deletions pkg/cmd/accept/exec.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -99,7 +100,7 @@ func (o *Options) accept(kubeClient *kubernetes.Clientset, clusterClient *cluste
return false, err
}
if csrApproved && mcUpdated {
fmt.Printf("\n Your managed cluster %s has joined the Hub successfully. Visit https://open-cluster-management.io/scenarios or https://github.com/open-cluster-management-io/OCM/tree/main/solutions for next steps.\n", clusterName)
fmt.Fprintf(o.Streams.Out, "\n Your managed cluster %s has joined the Hub successfully. Visit https://open-cluster-management.io/scenarios or https://github.com/open-cluster-management-io/OCM/tree/main/solutions for next steps.\n", clusterName)
return true, nil
}
return false, nil
Expand Down Expand Up @@ -141,12 +142,12 @@ func (o *Options) approveCSR(kubeClient *kubernetes.Clientset, clusterName strin
approved, denied := GetCertApprovalCondition(&passedCSR.Status)
//if already denied, then nothing to do
if denied {
fmt.Printf("CSR %s already denied\n", passedCSR.Name)
fmt.Fprintf(o.Streams.Out, "CSR %s already denied\n", passedCSR.Name)
return true, nil
}
//if already approved, then nothing to do
if approved {
fmt.Printf("CSR %s already approved\n", passedCSR.Name)
fmt.Fprintf(o.Streams.Out, "CSR %s already approved\n", passedCSR.Name)
return true, nil
}
csr = &passedCSR
Expand All @@ -157,7 +158,7 @@ func (o *Options) approveCSR(kubeClient *kubernetes.Clientset, clusterName strin
//no csr found
if csr == nil {
if waitMode {
fmt.Printf("no CSR to approve for cluster %s\n", clusterName)
fmt.Fprintf(o.Streams.Out, "no CSR to approve for cluster %s\n", clusterName)
}
return false, nil
}
Expand All @@ -182,7 +183,7 @@ func (o *Options) approveCSR(kubeClient *kubernetes.Clientset, clusterName strin
return false, err
}

fmt.Printf("CSR %s approved\n", csr.Name)
fmt.Fprintf(o.Streams.Out, "CSR %s approved\n", csr.Name)
return true, nil
}

Expand All @@ -197,19 +198,19 @@ func (o *Options) updateManagedCluster(clusterClient *clusterclientset.Clientset
return false, err
}
if mc.Spec.HubAcceptsClient {
fmt.Printf("hubAcceptsClient already set for managed cluster %s\n", clusterName)
fmt.Fprintf(o.Streams.Out, "hubAcceptsClient already set for managed cluster %s\n", clusterName)
return true, nil
}
if o.ClusteradmFlags.DryRun {
return true, nil
}
if !mc.Spec.HubAcceptsClient {
mc.Spec.HubAcceptsClient = true
_, err = clusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), mc, metav1.UpdateOptions{})
patch := `{"spec":{"hubAcceptsClient":true}}`
_, err = clusterClient.ClusterV1().ManagedClusters().Patch(context.TODO(), mc.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{})
if err != nil {
return false, err
}
fmt.Printf("set hubAcceptsClient to true for managed cluster %s\n", clusterName)
fmt.Fprintf(o.Streams.Out, "set hubAcceptsClient to true for managed cluster %s\n", clusterName)
}
return true, nil
}
Expand Down
3 changes: 3 additions & 0 deletions pkg/cmd/accept/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ type Options struct {
SkipApproveCheck bool

Values Values

Streams genericclioptions.IOStreams
}

//Values: The values used in the template
Expand All @@ -27,5 +29,6 @@ type Values struct {
func NewOptions(clusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags, streams genericclioptions.IOStreams) *Options {
return &Options{
ClusteradmFlags: clusteradmFlags,
Streams: streams,
}
}
2 changes: 1 addition & 1 deletion pkg/cmd/clean/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,6 @@ func NewCmd(clusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags, stream

cmd.Flags().StringVar(&o.ClusterManageName, "name", "cluster-manager", "The name of the cluster manager resource")
cmd.Flags().StringVar(&o.OutputFile, "output-file", "", "The generated resources will be copied in the specified file")
cmd.Flags().BoolVar(&o.UseBootstrapToken, "use-bootstrap-token", false, "If set then the bootstrap token will used instead of a service account token")
cmd.Flags().BoolVar(&o.purgeOperator, "purge-operator", true, "Purge the operator")
return cmd
}
179 changes: 111 additions & 68 deletions pkg/cmd/clean/exec.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"log"
"time"

"github.com/stolostron/applier/pkg/apply"
clustermanagerclient "open-cluster-management.io/api/client/operator/clientset/versioned"
"open-cluster-management.io/clusteradm/pkg/helpers"

Expand All @@ -17,7 +16,9 @@ import (
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
)
Expand Down Expand Up @@ -53,95 +54,60 @@ func (o *Options) Validate() error {
}

func (o *Options) Run() error {
output := make([]string, 0)

//Clean ClusterManager CR resource firstly
f := o.ClusteradmFlags.KubectlFactory
config, err := f.ToRESTConfig()
if err != nil {
log.Fatal(err)
return err
}
clusterManagerClient, err := clustermanagerclient.NewForConfig(config)
if err != nil {
log.Fatal(err)
return err
}

if IsClusterManagerExist(clusterManagerClient) {
err = clusterManagerClient.OperatorV1().ClusterManagers().Delete(context.Background(), o.ClusterManageName, metav1.DeleteOptions{})
if err != nil {
log.Fatal(err)
}
if !o.ClusteradmFlags.DryRun {
b := retry.DefaultBackoff
b.Duration = 1 * time.Second

err = WaitResourceToBeDelete(context.Background(), clusterManagerClient, o.ClusterManageName, b)
if !errors.IsNotFound(err) {
log.Fatal("Cluster Manager resource should be deleted firstly.")
}
}
}
//Clean other resources
kubeClient, apiExtensionsClient, _, err := helpers.GetClients(f)
if err != nil {
return err
}
_ = kubeClient.AppsV1().
Deployments("open-cluster-management").
Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
_ = apiExtensionsClient.ApiextensionsV1().
CustomResourceDefinitions().
Delete(context.Background(), "clustermanagers.operator.open-cluster-management.io", metav1.DeleteOptions{})
_ = kubeClient.RbacV1().
ClusterRoles().
Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
_ = kubeClient.RbacV1().
ClusterRoleBindings().
Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
_ = kubeClient.CoreV1().
ServiceAccounts("open-cluster-management").
Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})

if o.UseBootstrapToken {
_ = kubeClient.RbacV1().
ClusterRoles().
Delete(context.Background(), "system:open-cluster-management:bootstrap", metav1.DeleteOptions{})
_ = kubeClient.RbacV1().
ClusterRoleBindings().
Delete(context.Background(), "cluster-bootstrap", metav1.DeleteOptions{})
_ = kubeClient.CoreV1().
Secrets("kube-system").
Delete(context.Background(), "bootstrap-token-"+o.Values.Hub.TokenID, metav1.DeleteOptions{})
} else {
_ = kubeClient.RbacV1().
ClusterRoles().
Delete(context.Background(), "system:open-cluster-management:bootstrap", metav1.DeleteOptions{})
_ = kubeClient.RbacV1().
ClusterRoleBindings().
Delete(context.Background(), "cluster-bootstrap-sa", metav1.DeleteOptions{})
_ = kubeClient.CoreV1().
ServiceAccounts("open-cluster-management").
Delete(context.Background(), "cluster-bootstrap", metav1.DeleteOptions{})
}
_ = kubeClient.CoreV1().
Namespaces().
Delete(context.Background(), "open-cluster-management", metav1.DeleteOptions{})
fmt.Println("The multicluster hub control plane has been clean up successfully!")
if err := o.removeBootStrapSecret(kubeClient); err != nil {
return err
}

return apply.WriteOutput(o.OutputFile, output)
err = clusterManagerClient.OperatorV1().ClusterManagers().Delete(context.Background(), o.ClusterManageName, metav1.DeleteOptions{})
if errors.IsNotFound(err) {
fmt.Fprintf(o.Streams.Out, "The multicluster hub control plane is cleand up already\n")
return nil
}
b := retry.DefaultBackoff
b.Duration = 1 * time.Second

err = WaitResourceToBeDelete(context.Background(), clusterManagerClient, o.ClusterManageName, b)
if err != nil {
return err
}

if o.purgeOperator {
if err := puregeOperator(kubeClient, apiExtensionsClient); err != nil {
return err
}
}

fmt.Fprintf(o.Streams.Out, "The multicluster hub control plane has been clean up successfully!\n")

return nil
}
func WaitResourceToBeDelete(context context.Context, client clustermanagerclient.Interface, name string, b wait.Backoff) error {

errGet := retry.OnError(b, func(err error) bool {
if err != nil && !errors.IsNotFound(err) {
log.Printf("Wait to delete cluster manager resource: %s.\n", name)
return true
}
return false
return true
}, func() error {
_, err := client.OperatorV1().ClusterManagers().Get(context, name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
if err == nil {
return fmt.Errorf("ClusterManager is still exist")
return fmt.Errorf("cluster manager still exists")
}
return err
})
Expand All @@ -158,3 +124,80 @@ func IsClusterManagerExist(cilent clustermanagerclient.Interface) bool {
}
return false
}

func (o *Options) removeBootStrapSecret(client kubernetes.Interface) error {
var errs []error
err := client.RbacV1().
ClusterRoles().
Delete(context.Background(), "system:open-cluster-management:bootstrap", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = client.RbacV1().
ClusterRoleBindings().
Delete(context.Background(), "cluster-bootstrap", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = client.CoreV1().
Secrets("kube-system").
Delete(context.Background(), "bootstrap-token-"+o.Values.Hub.TokenID, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = client.RbacV1().
ClusterRoleBindings().
Delete(context.Background(), "cluster-bootstrap-sa", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = client.CoreV1().
ServiceAccounts("open-cluster-management").
Delete(context.Background(), "cluster-bootstrap", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
return utilerrors.NewAggregate(errs)
}

func puregeOperator(client kubernetes.Interface, extensionClient apiextensionsclient.Interface) error {
var errs []error
err := client.AppsV1().
Deployments("open-cluster-management").
Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = extensionClient.ApiextensionsV1().
CustomResourceDefinitions().
Delete(context.Background(), "clustermanagers.operator.open-cluster-management.io", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = client.RbacV1().
ClusterRoles().
Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = client.RbacV1().
ClusterRoleBindings().
Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = client.CoreV1().
ServiceAccounts("open-cluster-management").
Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}
err = client.CoreV1().
Namespaces().
Delete(context.Background(), "open-cluster-management", metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}

return utilerrors.NewAggregate(errs)
}
Loading

0 comments on commit f050413

Please sign in to comment.