From 204fbf77c97e7ca389ba401741438477a2cc328b Mon Sep 17 00:00:00 2001 From: wangyizhi1 Date: Fri, 27 Oct 2023 11:09:05 +0800 Subject: [PATCH] style: fix style Signed-off-by: wangyizhi1 --- cmd/clustertree/cluster-manager/app/manager.go | 6 +++--- .../cluster-manager/cluster_controller.go | 4 ++-- .../controllers/node_resources_controller.go | 16 ++++------------ 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/cmd/clustertree/cluster-manager/app/manager.go b/cmd/clustertree/cluster-manager/app/manager.go index 6a44f65a7..3e9cd5b90 100644 --- a/cmd/clustertree/cluster-manager/app/manager.go +++ b/cmd/clustertree/cluster-manager/app/manager.go @@ -69,12 +69,12 @@ func run(ctx context.Context, opts *options.Options) error { } // init Kosmos client - kosmosRootClient, err := utils.NewKosmosClientFromConfigPath(opts.KubernetesOptions.KubeConfig, configOptFunc) + rootKosmosClient, err := utils.NewKosmosClientFromConfigPath(opts.KubernetesOptions.KubeConfig, configOptFunc) if err != nil { return fmt.Errorf("could not build kosmos clientset for root cluster: %v", err) } - rootResourceManager := utils.NewResourceManager(rootClient, kosmosRootClient) + rootResourceManager := utils.NewResourceManager(rootClient, rootKosmosClient) mgr, err := controllerruntime.NewManager(config, controllerruntime.Options{ Logger: klog.Background(), Scheme: scheme.NewSchema(), @@ -135,7 +135,7 @@ func run(ctx context.Context, opts *options.Options) error { rootResourceManager.InformerFactory.Start(ctx.Done()) rootResourceManager.KosmosInformerFactory.Start(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), rootResourceManager.EndpointSliceInformer.HasSynced) { - klog.Fatal("Knode manager: wait for informer factory failed") + klog.Fatal("cluster manager: wait for informer factory failed") } <-ctx.Done() diff --git a/pkg/clustertree/cluster-manager/cluster_controller.go b/pkg/clustertree/cluster-manager/cluster_controller.go index 536c7a090..ddf71e980 100644 --- a/pkg/clustertree/cluster-manager/cluster_controller.go +++ b/pkg/clustertree/cluster-manager/cluster_controller.go @@ -45,7 +45,7 @@ const ( RootClusterAnnotationValue = "root" DefaultLeafKubeQPS = 40.0 - DefalutLeafKubeBurst = 60 + DefaultLeafKubeBurst = 60 ) type ClusterController struct { @@ -134,7 +134,7 @@ func (c *ClusterController) Reconcile(ctx context.Context, request reconcile.Req config, err := utils.NewConfigFromBytes(cluster.Spec.Kubeconfig, func(config *rest.Config) { config.QPS = DefaultLeafKubeQPS - config.Burst = DefalutLeafKubeBurst + config.Burst = DefaultLeafKubeBurst }) if err != nil { return reconcile.Result{}, fmt.Errorf("could not build kubeconfig for cluster %s: %v", cluster.Name, err) diff --git a/pkg/clustertree/cluster-manager/controllers/node_resources_controller.go b/pkg/clustertree/cluster-manager/controllers/node_resources_controller.go index 0ae916c1c..cf9c14e5c 100644 --- a/pkg/clustertree/cluster-manager/controllers/node_resources_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/node_resources_controller.go @@ -93,11 +93,6 @@ func (c *NodeResourcesController) Reconcile(ctx context.Context, request reconci klog.V(4).Infof("============ %s has been reconciled =============", request.Name) }() - node := &corev1.Node{} - if err := c.Leaf.Get(ctx, request.NamespacedName, node); err != nil { - return controllerruntime.Result{}, err - } - nodes := corev1.NodeList{} if err := c.Leaf.List(ctx, &nodes); err != nil { return controllerruntime.Result{}, err @@ -110,23 +105,20 @@ func (c *NodeResourcesController) Reconcile(ctx context.Context, request reconci clusterResources := utils.CalculateClusterResources(&nodes, &pods) - curr := &corev1.Node{} - namespacedName := types.NamespacedName{ - Name: c.Node.Name, - } - err := c.Root.Get(ctx, namespacedName, curr) + node := &corev1.Node{} + err := c.Root.Get(ctx, types.NamespacedName{Name: c.Node.Name}, node) if err != nil { return reconcile.Result{ RequeueAfter: RequeueTime, }, fmt.Errorf("cannot get node while update node resources %s, err: %v", c.Node.Name, err) } - clone := curr.DeepCopy() + clone := node.DeepCopy() clone.Status.Allocatable = clusterResources clone.Status.Capacity = clusterResources clone.Status.Conditions = utils.NodeConditions() - patch, err := utils.CreateMergePatch(curr, clone) + patch, err := utils.CreateMergePatch(node, clone) if err != nil { return reconcile.Result{}, err }