From dd1cb05939479b2b26b95c560f260dc470aeeb81 Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Mon, 11 Sep 2023 09:46:41 +0800 Subject: [PATCH] Refactor singleton mode to use image built in ocm Signed-off-by: Jian Qiu --- go.mod | 2 +- go.sum | 4 +- .../scenario/init/clustermanagers.crd.yaml | 6 +- pkg/cmd/join/exec.go | 179 ++----- pkg/cmd/join/options.go | 2 - .../join/scenario/join/klusterlets.cr.yaml | 3 +- .../join/scenario/join/klusterlets.crd.yaml | 36 +- pkg/cmd/join/scenario/join/operator.yaml | 2 +- vendor/modules.txt | 2 +- ...anagement.io_managedclusteraddons.crd.yaml | 9 +- ...agement.io_addondeploymentconfigs.crd.yaml | 22 + ...ster-management.io_addontemplates.crd.yaml | 471 ++++++++++++++++++ .../api/addon/v1alpha1/register.go | 2 + .../v1alpha1/types_addondeploymentconfig.go | 28 ++ .../api/addon/v1alpha1/types_addontemplate.go | 186 +++++++ .../v1alpha1/types_managedclusteraddon.go | 7 +- .../addon/v1alpha1/zz_generated.deepcopy.go | 226 +++++++++ .../zz_generated.swagger_doc_generated.go | 102 +++- .../typed/addon/v1alpha1/addon_client.go | 5 + .../typed/addon/v1alpha1/addontemplate.go | 152 ++++++ .../addon/v1alpha1/generated_expansion.go | 2 + .../typed/cluster/v1beta1/cluster_client.go | 10 - .../cluster/v1beta1/generated_expansion.go | 4 - .../cluster/v1beta1/managedclusterset.go | 168 ------- .../v1beta1/managedclustersetbinding.go | 179 ------- .../api/cluster/v1/types.go | 23 + .../api/cluster/v1alpha1/helpers.go | 382 ++++++++++++++ .../cluster/v1alpha1/types_rolloutstrategy.go | 103 ++++ .../cluster/v1alpha1/zz_generated.deepcopy.go | 193 +++++++ .../zz_generated.swagger_doc_generated.go | 65 +++ ...-management.io_managedclustersets.crd.yaml | 207 -------- ...ment.io_managedclustersetbindings.crd.yaml | 136 ----- ...-cluster-management.io_placements.crd.yaml | 186 +++++++ .../api/cluster/v1beta1/helpers.go | 311 +++++++----- .../api/cluster/v1beta1/register.go | 4 - .../v1beta1/types_managedclusterset.go | 99 ---- .../v1beta1/types_managedclustersetbinding.go | 65 --- .../api/cluster/v1beta1/types_placement.go | 79 +++ .../v1beta1/types_placementdecision.go | 7 +- .../cluster/v1beta1/zz_generated.deepcopy.go | 236 +++------ .../zz_generated.swagger_doc_generated.go | 129 ++--- ...-management.io_managedclustersets.crd.yaml | 189 +------ ...ment.io_managedclustersetbindings.crd.yaml | 107 +--- .../api/feature/feature.go | 4 +- ...cluster-management.io_klusterlets.crd.yaml | 36 +- ...ter-management.io_clustermanagers.crd.yaml | 6 +- .../v1/{types.go => types_clustermanager.go} | 219 +------- .../api/operator/v1/types_klusterlet.go | 197 ++++++++ .../api/operator/v1/zz_generated.deepcopy.go | 7 + .../v1/zz_generated.swagger_doc_generated.go | 112 +++-- .../api/work/v1/types.go | 22 +- ...gement.io_manifestworkreplicasets.crd.yaml | 176 ++++++- .../v1alpha1/types_manifestworkreplicaset.go | 34 +- .../work/v1alpha1/zz_generated.deepcopy.go | 27 +- .../zz_generated.swagger_doc_generated.go | 18 +- 55 files changed, 3209 insertions(+), 1979 deletions(-) create mode 100644 vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml create mode 100644 vendor/open-cluster-management.io/api/addon/v1alpha1/types_addontemplate.go create mode 100644 vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addontemplate.go delete mode 100644 vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclusterset.go delete mode 100644 vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclustersetbinding.go create mode 100644 vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go create mode 100644 vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go delete mode 100644 vendor/open-cluster-management.io/api/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml delete mode 100644 vendor/open-cluster-management.io/api/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml delete mode 100644 vendor/open-cluster-management.io/api/cluster/v1beta1/types_managedclusterset.go delete mode 100644 vendor/open-cluster-management.io/api/cluster/v1beta1/types_managedclustersetbinding.go rename vendor/open-cluster-management.io/api/operator/v1/{types.go => types_clustermanager.go} (59%) create mode 100644 vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go diff --git a/go.mod b/go.mod index 126f6942c..75b9486ca 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( k8s.io/klog/v2 v2.90.1 k8s.io/kubectl v0.27.1 k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 - open-cluster-management.io/api v0.11.0 + open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83 open-cluster-management.io/cluster-proxy v0.1.2 open-cluster-management.io/managed-serviceaccount v0.2.1-0.20220427065210-de6a7b7b5be8 sigs.k8s.io/apiserver-network-proxy v0.1.2 diff --git a/go.sum b/go.sum index a007c0dc8..8b5e181e4 100644 --- a/go.sum +++ b/go.sum @@ -1601,8 +1601,8 @@ modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= open-cluster-management.io/addon-framework v0.1.1-0.20211223101009-d6b1a7adae93/go.mod h1:reAEgSFo9UX+TzfGjhHMWKmrOC4hJYxBHeubqllDbhk= open-cluster-management.io/api v0.5.0/go.mod h1:9qiA5h/8kvPQnJEOlAPHVjRO9a1jCmDhGzvgMBvXEaE= -open-cluster-management.io/api v0.11.0 h1:zBxa33Co3wseLBF4HEJobhl0P6ygj+Drhe7Wrfo0/h8= -open-cluster-management.io/api v0.11.0/go.mod h1:WgKUCJ7+Bf40DsOmH1Gdkpyj3joco+QLzrlM6Ak39zE= +open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83 h1:3zbT3sT/tEAQbpjIk6uRiTQGknQ3kQlfd11ElVuXyyQ= +open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83/go.mod h1:nsQ/G5JpfjQUg7dHpblyywWC6BRqklNaF6fIswVCHyY= open-cluster-management.io/cluster-proxy v0.1.2 h1:0WYhPEZT6Wlt0dyE35GCC7zcqPo2poaALzdmVbQp9Dg= open-cluster-management.io/cluster-proxy v0.1.2/go.mod h1:f4HVfparQqOJsPrh4fTA4uyWx5KJ7MG2AcuSRypedws= open-cluster-management.io/managed-serviceaccount v0.2.1-0.20220427065210-de6a7b7b5be8 h1:foP5RkSec9DkUA8sJ5FLna5We8VxJyu0179id5zNXgA= diff --git a/pkg/cmd/init/scenario/init/clustermanagers.crd.yaml b/pkg/cmd/init/scenario/init/clustermanagers.crd.yaml index 9cd78cf81..b517451c6 100644 --- a/pkg/cmd/init/scenario/init/clustermanagers.crd.yaml +++ b/pkg/cmd/init/scenario/init/clustermanagers.crd.yaml @@ -89,7 +89,7 @@ spec: Default mode is used if DeployOption is not set. properties: hosted: - description: Hosted includes configurations we needs for clustermanager + description: Hosted includes configurations we need for clustermanager in the Hosted mode. properties: registrationWebhookConfiguration: @@ -161,8 +161,8 @@ spec: on. The default is an empty list. type: object tolerations: - description: Tolerations is attached by pods to tolerate any taint - that matches the triple using the matching + description: Tolerations are attached by pods to tolerate any + taint that matches the triple using the matching operator . The default is an empty list. items: description: The pod this Toleration is attached to tolerates diff --git a/pkg/cmd/join/exec.go b/pkg/cmd/join/exec.go index b14375d2f..b04974502 100644 --- a/pkg/cmd/join/exec.go +++ b/pkg/cmd/join/exec.go @@ -24,6 +24,7 @@ import ( "k8s.io/klog/v2" "k8s.io/kubectl/pkg/cmd/util" ocmfeature "open-cluster-management.io/api/feature" + operatorv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/clusteradm/pkg/cmd/join/preflight" "open-cluster-management.io/clusteradm/pkg/cmd/join/scenario" genericclioptionsclusteradm "open-cluster-management.io/clusteradm/pkg/genericclioptions" @@ -88,42 +89,45 @@ func (o *Options) complete(cmd *cobra.Command, args []string) (err error) { Registry: o.registry, AgentNamespace: agentNamespace, } + // deploy klusterlet + // operatorNamespace is the namespace to deploy klsuterlet; + // agentNamespace is the namesapce to deploy the agents(registration agent, work agent, etc.); + // klusterletNamespace is the namespace created on the managed cluster for each klusterlet. + // + // The operatorNamespace is fixed to "open-cluster-management". + // In default mode, agentNamespace is "open-cluster-management-agent", klusterletNamespace refers to agentNamespace, all of these three namesapces are on the managed cluster; + // In hosted mode, operatorNamespace is on the management cluster, agentNamesapce is "-<6-bit random string>" on the management cluster, and the klusterletNamespace is "open-cluster-management-" on the managed cluster. + + // values for default mode + klusterletName := DefaultOperatorName + klusterletNamespace := agentNamespace + if o.mode == InstallModeHosted { + // add hash suffix to avoid conflict + klusterletName += "-hosted-" + helpers.RandStringRunes_az09(6) + agentNamespace = klusterletName + klusterletNamespace = AgentNamespacePrefix + agentNamespace - if o.singleton { // deploy singleton agent - if o.mode != InstallModeDefault { - return fmt.Errorf("only default mode is supported while deploy singleton agent, hosted mode will be supported in the future") - } - } else { // deploy klusterlet - // operatorNamespace is the namespace to deploy klsuterlet; - // agentNamespace is the namesapce to deploy the agents(registration agent, work agent, etc.); - // klusterletNamespace is the namespace created on the managed cluster for each klusterlet. - // - // The operatorNamespace is fixed to "open-cluster-management". - // In default mode, agentNamespace is "open-cluster-management-agent", klusterletNamespace refers to agentNamespace, all of these three namesapces are on the managed cluster; - // In hosted mode, operatorNamespace is on the management cluster, agentNamesapce is "-<6-bit random string>" on the management cluster, and the klusterletNamespace is "open-cluster-management-" on the managed cluster. - - // values for default mode - klusterletName := DefaultOperatorName - klusterletNamespace := agentNamespace - if o.mode == InstallModeHosted { - // add hash suffix to avoid conflict - klusterletName += "-hosted-" + helpers.RandStringRunes_az09(6) - agentNamespace = klusterletName - klusterletNamespace = AgentNamespacePrefix + agentNamespace + // update AgentNamespace + o.values.AgentNamespace = agentNamespace + } - // update AgentNamespace - o.values.AgentNamespace = agentNamespace - } + o.values.Klusterlet = Klusterlet{ + Name: klusterletName, + KlusterletNamespace: klusterletNamespace, + } + o.values.ManagedKubeconfig = o.managedKubeconfigFile + o.values.RegistrationFeatures = genericclioptionsclusteradm.ConvertToFeatureGateAPI(genericclioptionsclusteradm.SpokeMutableFeatureGate, ocmfeature.DefaultSpokeRegistrationFeatureGates) + o.values.WorkFeatures = genericclioptionsclusteradm.ConvertToFeatureGateAPI(genericclioptionsclusteradm.SpokeMutableFeatureGate, ocmfeature.DefaultSpokeWorkFeatureGates) - o.values.Klusterlet = Klusterlet{ - Mode: o.mode, - Name: klusterletName, - KlusterletNamespace: klusterletNamespace, - } - o.values.ManagedKubeconfig = o.managedKubeconfigFile - o.values.RegistrationFeatures = genericclioptionsclusteradm.ConvertToFeatureGateAPI(genericclioptionsclusteradm.SpokeMutableFeatureGate, ocmfeature.DefaultSpokeRegistrationFeatureGates) - o.values.WorkFeatures = genericclioptionsclusteradm.ConvertToFeatureGateAPI(genericclioptionsclusteradm.SpokeMutableFeatureGate, ocmfeature.DefaultSpokeWorkFeatureGates) + // set mode based on mode and singleton + if o.mode == InstallModeHosted && o.singleton { + o.values.Klusterlet.Mode = string(operatorv1.InstallModeSingletonHosted) + } else if o.singleton { + o.values.Klusterlet.Mode = string(operatorv1.InstallModeSingleton) + } else { + o.values.Klusterlet.Mode = o.mode } + versionBundle, err := version.GetVersionBundle(o.bundleVersion) if err != nil { @@ -132,11 +136,10 @@ func (o *Options) complete(cmd *cobra.Command, args []string) (err error) { } o.values.BundleVersion = BundleVersion{ - RegistrationImageVersion: versionBundle.Registration, - PlacementImageVersion: versionBundle.Placement, - WorkImageVersion: versionBundle.Work, - OperatorImageVersion: versionBundle.Operator, - SingletonAgentImageVersion: versionBundle.MulticlusterControlplane, + RegistrationImageVersion: versionBundle.Registration, + PlacementImageVersion: versionBundle.Placement, + WorkImageVersion: versionBundle.Work, + OperatorImageVersion: versionBundle.Operator, } klog.V(3).InfoS("Image version:", "'registration image version'", versionBundle.Registration, @@ -243,35 +246,9 @@ func (o *Options) run() error { r := reader.NewResourceReader(o.builder, o.ClusteradmFlags.DryRun, o.Streams) - _, err = kubeClient.CoreV1().Namespaces().Get(context.TODO(), o.values.AgentNamespace, metav1.GetOptions{}) + err = o.applyKlusterlet(r, kubeClient, apiExtensionsClient) if err != nil { - if errors.IsNotFound(err) { - _, err = kubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: o.values.AgentNamespace, - Annotations: map[string]string{ - "workload.openshift.io/allowed": "management", - }, - }, - }, metav1.CreateOptions{}) - if err != nil { - return err - } - } else { - return err - } - } - - if o.singleton { - err = o.applySingletonAgent(r, kubeClient) - if err != nil { - return err - } - } else { - err = o.applyKlusterlet(r, kubeClient, apiExtensionsClient) - if err != nil { - return err - } + return err } if len(o.outputFile) > 0 { @@ -294,32 +271,6 @@ func (o *Options) run() error { } -func (o *Options) applySingletonAgent(r *reader.ResourceReader, kubeClient kubernetes.Interface) error { - files := []string{ - "bootstrap_hub_kubeconfig.yaml", - "singleton/clusterrole.yaml", - "singleton/clusterrolebinding-admin.yaml", - "singleton/clusterrolebinding.yaml", - "singleton/role.yaml", - "singleton/rolebinding.yaml", - "singleton/serviceaccount.yaml", - "singleton/deployment.yaml", - } - - err := r.Apply(scenario.Files, o.values, files...) - if err != nil { - return err - } - - if o.wait && !o.ClusteradmFlags.DryRun { - err = waitUntilSingletonAgentConditionIsTrue(o.ClusteradmFlags.KubectlFactory, int64(o.ClusteradmFlags.Timeout), o.values.AgentNamespace) - if err != nil { - return err - } - } - return nil -} - func (o *Options) applyKlusterlet(r *reader.ResourceReader, kubeClient kubernetes.Interface, apiExtensionsClient apiextensionsclient.Interface) error { available, err := checkIfRegistrationOperatorAvailable(o.ClusteradmFlags.KubectlFactory) if err != nil { @@ -525,52 +476,6 @@ func waitUntilKlusterletConditionIsTrue(f util.Factory, timeout int64, agentName ) } -func waitUntilSingletonAgentConditionIsTrue(f util.Factory, timeout int64, agentNamespace string) error { - client, err := f.KubernetesClientSet() - if err != nil { - return err - } - - phase := &atomic.Value{} - phase.Store("") - agentSpinner := printer.NewSpinnerWithStatus( - "Waiting for controlplane agent to become ready...", - time.Millisecond*500, - "Controlplane agent is now available.\n", - func() string { - return phase.Load().(string) - }) - agentSpinner.Start() - defer agentSpinner.Stop() - - return helpers.WatchUntil( - func() (watch.Interface, error) { - return client.CoreV1().Pods(agentNamespace). - Watch(context.TODO(), metav1.ListOptions{ - TimeoutSeconds: &timeout, - LabelSelector: "app=multicluster-controlplane-agent", - }) - }, - func(event watch.Event) bool { - pod, ok := event.Object.(*corev1.Pod) - if !ok { - return false - } - phase.Store(printer.GetSpinnerPodStatus(pod)) - conds := make([]metav1.Condition, len(pod.Status.Conditions)) - for i := range pod.Status.Conditions { - conds[i] = metav1.Condition{ - Type: string(pod.Status.Conditions[i].Type), - Status: metav1.ConditionStatus(pod.Status.Conditions[i].Status), - Reason: pod.Status.Conditions[i].Reason, - Message: pod.Status.Conditions[i].Message, - } - } - return meta.IsStatusConditionTrue(conds, "Ready") - }, - ) -} - // Create bootstrap with token but without CA func (o *Options) createExternalBootstrapConfig() clientcmdapiv1.Config { return clientcmdapiv1.Config{ diff --git a/pkg/cmd/join/options.go b/pkg/cmd/join/options.go index 3f698175e..c342629c1 100644 --- a/pkg/cmd/join/options.go +++ b/pkg/cmd/join/options.go @@ -110,8 +110,6 @@ type BundleVersion struct { WorkImageVersion string // operator image version OperatorImageVersion string - // singleton agent image version - SingletonAgentImageVersion string } func newOptions(clusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags, streams genericclioptions.IOStreams) *Options { diff --git a/pkg/cmd/join/scenario/join/klusterlets.cr.yaml b/pkg/cmd/join/scenario/join/klusterlets.cr.yaml index aa9dddef8..e0686c409 100644 --- a/pkg/cmd/join/scenario/join/klusterlets.cr.yaml +++ b/pkg/cmd/join/scenario/join/klusterlets.cr.yaml @@ -7,7 +7,8 @@ spec: deployOption: mode: {{ .Klusterlet.Mode }} registrationImagePullSpec: {{ .Registry }}/registration:{{ .BundleVersion.RegistrationImageVersion }} - workImagePullSpec: {{ .Registry }}/work:{{ .BundleVersion.RegistrationImageVersion }} + workImagePullSpec: {{ .Registry }}/work:{{ .BundleVersion.RegistrationImageVersion }} + imagePullSpec: {{.Registry}}/registration-operator:{{.BundleVersion.OperatorImageVersion}} clusterName: {{ .ClusterName }} namespace: {{ .Klusterlet.KlusterletNamespace }} externalServerURLs: diff --git a/pkg/cmd/join/scenario/join/klusterlets.crd.yaml b/pkg/cmd/join/scenario/join/klusterlets.crd.yaml index e25c446fb..da6d11a47 100644 --- a/pkg/cmd/join/scenario/join/klusterlets.crd.yaml +++ b/pkg/cmd/join/scenario/join/klusterlets.crd.yaml @@ -50,20 +50,22 @@ spec: description: DeployOption contains the options of deploying a klusterlet properties: mode: - description: 'Mode can be Default or Hosted. It is Default mode - if not specified In Default mode, all klusterlet related resources - are deployed on the managed cluster. In Hosted mode, only crd - and configurations are installed on the spoke/managed cluster. - Controllers run in another cluster (defined as management-cluster) + description: 'Mode can be Default, Hosted, Singleton or SingletonHosted. + It is Default mode if not specified In Default mode, all klusterlet + related resources are deployed on the managed cluster. In Hosted + mode, only crd and configurations are installed on the spoke/managed + cluster. Controllers run in another cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of "external-managed-kubeconfig"(a kubeconfig of managed-cluster - with cluster-admin permission). Note: Do not modify the Mode - field once it''s applied.' + with cluster-admin permission). In Singleton mode, registration/work + agent is started as a single deployment. In SingletonHosted + mode, agent is started as a single deployment in hosted mode. + Note: Do not modify the Mode field once it''s applied.' type: string type: object externalServerURLs: - description: ExternalServerURLs represents the a list of apiserver - urls and ca bundles that is accessible externally If it is set empty, + description: ExternalServerURLs represents a list of apiserver urls + and ca bundles that is accessible externally If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. items: @@ -99,6 +101,11 @@ spec: - hostname - ip type: object + imagePullSpec: + description: ImagePullSpec represents the desired image configuration + of agent, it takes effect only when singleton mode is set. quay.io/open-cluster-management.io/registration-operator:latest + will be used if unspecified + type: string namespace: description: Namespace is the namespace to deploy the agent on the managed cluster. The namespace must have a prefix of "open-cluster-management-", @@ -123,8 +130,8 @@ spec: on. The default is an empty list. type: object tolerations: - description: Tolerations is attached by pods to tolerate any taint - that matches the triple using the matching + description: Tolerations are attached by pods to tolerate any + taint that matches the triple using the matching operator . The default is an empty list. items: description: The pod this Toleration is attached to tolerates @@ -179,6 +186,13 @@ spec: set. format: int32 type: integer + clusterAnnotations: + additionalProperties: + type: string + description: ClusterAnnotations is annotations with the reserve + prefix "agent.open-cluster-management.io" set on ManagedCluster + when creating only, other actors can update it afterwards. + type: object featureGates: description: 'FeatureGates represents the list of feature gates for registration If it is set empty, default feature gates will diff --git a/pkg/cmd/join/scenario/join/operator.yaml b/pkg/cmd/join/scenario/join/operator.yaml index 1a055be67..9f87fa5b9 100644 --- a/pkg/cmd/join/scenario/join/operator.yaml +++ b/pkg/cmd/join/scenario/join/operator.yaml @@ -42,7 +42,7 @@ spec: serviceAccountName: klusterlet containers: - name: klusterlet - image: {{ .Registry }}/registration-operator:{{ .BundleVersion.RegistrationImageVersion }} + image: {{ .Registry }}/registration-operator:{{ .BundleVersion.OperatorImageVersion }} args: - "/registration-operator" - "klusterlet" diff --git a/vendor/modules.txt b/vendor/modules.txt index 00fd7d7cf..d3f44eedc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1198,7 +1198,7 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# open-cluster-management.io/api v0.11.0 +# open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83 ## explicit; go 1.19 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml index 8d2851235..cc9a3d052 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml @@ -51,7 +51,7 @@ spec: configs: description: configs is a list of add-on configurations. In scenario where the current add-on has its own configurations. An empty list - means there are no defautl configurations for add-on. The default + means there are no default configurations for add-on. The default is an empty list items: properties: @@ -318,9 +318,10 @@ spec: subject: description: 'subject is the user subject of the addon agent to be registered to the hub. If it is not set, the addon agent - will have the default subject "subject": { "user": "system:open-cluster-management:addon:{addonName}:{clusterName}:{agentName}", - "groups: ["system:open-cluster-management:addon", "system:open-cluster-management:addon:{addonName}", - "system:authenticated"] }' + will have the default subject "subject": { "user": "system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}", + "groups: ["system:open-cluster-management:cluster:{clusterName}:addon:{addonName}", + "system:open-cluster-management:addon:{addonName}", "system:authenticated"] + }' properties: groups: description: groups is the user group of the addon agent. diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml index 5396c46c3..69709fbd5 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml @@ -33,6 +33,13 @@ spec: spec: description: spec represents a desired configuration for an add-on. properties: + agentInstallNamespace: + default: open-cluster-management-agent-addon + description: AgentInstallNamespace is the namespace where the add-on + agent should be installed on the managed cluster. + maxLength: 63 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string customizedVariables: description: CustomizedVariables is a list of name-value variables for the current add-on deployment. The add-on implementation can @@ -118,6 +125,21 @@ spec: type: object type: array type: object + proxyConfig: + description: ProxyConfig holds proxy settings for add-on agent on + the managed cluster. Empty means no proxy settings is available. + properties: + httpProxy: + description: HTTPProxy is the URL of the proxy for HTTP requests + type: string + httpsProxy: + description: HTTPSProxy is the URL of the proxy for HTTPS requests + type: string + noProxy: + description: NoProxy is a comma-separated list of hostnames and/or + CIDRs and/or IPs for which the proxy should not be used. + type: string + type: object registries: description: "Registries describes how to override images used by the addon agent on the managed cluster. the following example will diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml new file mode 100644 index 000000000..ade1103ff --- /dev/null +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml @@ -0,0 +1,471 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: addontemplates.addon.open-cluster-management.io +spec: + group: addon.open-cluster-management.io + names: + kind: AddOnTemplate + listKind: AddOnTemplateList + plural: addontemplates + singular: addontemplate + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.addonName + name: ADDON NAME + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: "AddOnTemplate is the Custom Resource object, it is used to describe + how to deploy the addon agent and how to register the addon. \n AddOnTemplate + is a cluster-scoped resource, and will only be used on the hub cluster." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds the registration configuration for the addon and + the addon agent resources yaml description. + properties: + addonName: + description: AddonName represents the name of the addon which the + template belongs to + type: string + agentSpec: + description: AgentSpec describes what/how the kubernetes resources + of the addon agent to be deployed on a managed cluster. + properties: + deleteOption: + description: DeleteOption represents deletion strategy when the + manifestwork is deleted. Foreground deletion strategy is applied + to all the resource in this manifestwork if it is not set. + properties: + propagationPolicy: + default: Foreground + description: propagationPolicy can be Foreground, Orphan or + SelectivelyOrphan SelectivelyOrphan should be rarely used. It + is provided for cases where particular resources is transfering + ownership from one ManifestWork to another or another management + unit. Setting this value will allow a flow like 1. create + manifestwork/2 to manage foo 2. update manifestwork/1 to + selectively orphan foo 3. remove foo from manifestwork/1 + without impacting continuity because manifestwork/2 adopts + it. + enum: + - Foreground + - Orphan + - SelectivelyOrphan + type: string + selectivelyOrphans: + description: selectivelyOrphan represents a list of resources + following orphan deletion stratecy + properties: + orphaningRules: + description: orphaningRules defines a slice of orphaningrule. + Each orphaningrule identifies a single resource included + in this manifestwork + items: + description: OrphaningRule identifies a single resource + included in this manifestwork to be orphaned + properties: + group: + description: Group is the API Group of the Kubernetes + resource, empty string indicates it is in core + group. + type: string + name: + description: Name is the name of the Kubernetes + resource. + type: string + namespace: + description: Name is the namespace of the Kubernetes + resource, empty string indicates it is a cluster + scoped resource. + type: string + resource: + description: Resource is the resource name of the + Kubernetes resource. + type: string + required: + - name + - resource + type: object + type: array + type: object + type: object + executor: + description: Executor is the configuration that makes the work + agent to perform some pre-request processing/checking. e.g. + the executor identity tells the work agent to check the executor + has sufficient permission to write the workloads to the local + managed cluster. Note that nil executor is still supported for + backward-compatibility which indicates that the work agent will + not perform any additional actions before applying resources. + properties: + subject: + description: Subject is the subject identity which the work + agent uses to talk to the local cluster when applying the + resources. + properties: + serviceAccount: + description: ServiceAccount is for identifying which service + account to use by the work agent. Only required if the + type is "ServiceAccount". + properties: + name: + description: Name is the name of the service account. + maxLength: 253 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$ + type: string + namespace: + description: Namespace is the namespace of the service + account. + maxLength: 253 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$ + type: string + required: + - name + - namespace + type: object + type: + description: 'Type is the type of the subject identity. + Supported types are: "ServiceAccount".' + enum: + - ServiceAccount + type: string + required: + - type + type: object + type: object + manifestConfigs: + description: ManifestConfigs represents the configurations of + manifests defined in workload field. + items: + description: ManifestConfigOption represents the configurations + of a manifest defined in workload field. + properties: + feedbackRules: + description: FeedbackRules defines what resource status + field should be returned. If it is not set or empty, no + feedback rules will be honored. + items: + properties: + jsonPaths: + description: JsonPaths defines the json path under + status field to be synced. + items: + properties: + name: + description: Name represents the alias name + for this field + type: string + path: + description: Path represents the json path of + the field under status. The path must point + to a field with single value in the type of + integer, bool or string. If the path points + to a non-existing field, no value will be + returned. If the path points to a structure, + map or slice, no value will be returned and + the status conddition of StatusFeedBackSynced + will be set as false. Ref to https://kubernetes.io/docs/reference/kubectl/jsonpath/ + on how to write a jsonPath. + type: string + version: + description: Version is the version of the Kubernetes + resource. If it is not specified, the resource + with the semantically latest version is used + to resolve the path. + type: string + required: + - name + - path + type: object + type: array + type: + description: Type defines the option of how status + can be returned. It can be jsonPaths or wellKnownStatus. + If the type is JSONPaths, user should specify the + jsonPaths field If the type is WellKnownStatus, + certain common fields of status defined by a rule + only for types in in k8s.io/api and open-cluster-management/api + will be reported, If these status fields do not + exist, no values will be reported. + enum: + - WellKnownStatus + - JSONPaths + type: string + required: + - type + type: object + type: array + resourceIdentifier: + description: ResourceIdentifier represents the group, resource, + name and namespace of a resoure. iff this refers to a + resource not created by this manifest work, the related + rules will not be executed. + properties: + group: + description: Group is the API Group of the Kubernetes + resource, empty string indicates it is in core group. + type: string + name: + description: Name is the name of the Kubernetes resource. + type: string + namespace: + description: Name is the namespace of the Kubernetes + resource, empty string indicates it is a cluster scoped + resource. + type: string + resource: + description: Resource is the resource name of the Kubernetes + resource. + type: string + required: + - name + - resource + type: object + updateStrategy: + description: UpdateStrategy defines the strategy to update + this manifest. UpdateStrategy is Update if it is not set. + properties: + serverSideApply: + description: serverSideApply defines the configuration + for server side apply. It is honored only when type + of updateStrategy is ServerSideApply + properties: + fieldManager: + default: work-agent + description: FieldManager is the manager to apply + the resource. It is work-agent by default, but + can be other name with work-agent as the prefix. + pattern: ^work-agent + type: string + force: + description: Force represents to force apply the + manifest. + type: boolean + type: object + type: + default: Update + description: type defines the strategy to update this + manifest, default value is Update. Update type means + to update resource by an update call. CreateOnly type + means do not update resource based on current manifest. + ServerSideApply type means to update resource using + server side apply with work-controller as the field + manager. If there is conflict, the related Applied + condition of manifest will be in the status of False + with the reason of ApplyConflict. + enum: + - Update + - CreateOnly + - ServerSideApply + type: string + required: + - type + type: object + required: + - resourceIdentifier + type: object + type: array + workload: + description: Workload represents the manifest workload to be deployed + on a managed cluster. + properties: + manifests: + description: Manifests represents a list of kuberenetes resources + to be deployed on a managed cluster. + items: + description: Manifest represents a resource to be deployed + on managed cluster. + type: object + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + registration: + description: Registration holds the registration configuration for + the addon + items: + description: RegistrationSpec describes how to register an addon + agent to the hub cluster. With the registration defined, The addon + agent can access to kube apiserver with kube style API or other + endpoints on hub cluster with client certificate authentication. + During the addon registration process, a csr will be created for + each Registration on the hub cluster. The CSR will be approved + automatically, After the csr is approved on the hub cluster, the + klusterlet agent will create a secret in the installNamespace + for the addon agent. If the RegistrationType type is KubeClient, + the secret name will be "{addon name}-hub-kubeconfig" whose content + includes key/cert and kubeconfig. Otherwise, If the RegistrationType + type is CustomSigner the secret name will be "{addon name}-{signer + name}-client-cert" whose content includes key/cert. + properties: + customSigner: + description: CustomSigner holds the configuration of the CustomSigner + type registration required when the Type is CustomSigner + properties: + signerName: + description: signerName is the name of signer that addon + agent will use to create csr. + maxLength: 571 + minLength: 5 + type: string + signingCA: + description: 'SigningCA represents the reference of the + secret on the hub cluster to sign the CSR the secret must + be in the namespace where the addon-manager is located, + and the secret type must be "kubernetes.io/tls" Note: + The addon manager will not have permission to access the + secret by default, so the user must grant the permission + to the addon manager(by creating rolebinding for the addon-manager + serviceaccount "addon-manager-controller-sa").' + properties: + name: + description: Name of the signing CA secret + type: string + required: + - name + type: object + subject: + description: 'Subject is the user subject of the addon agent + to be registered to the hub. If it is not set, the addon + agent will have the default subject "subject": { "user": + "system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}", + "groups: ["system:open-cluster-management:cluster:{clusterName}:addon:{addonName}", + "system:open-cluster-management:addon:{addonName}", "system:authenticated"] + }' + properties: + groups: + description: groups is the user group of the addon agent. + items: + type: string + type: array + organizationUnit: + description: organizationUnit is the ou of the addon + agent + items: + type: string + type: array + user: + description: user is the user name of the addon agent. + type: string + type: object + required: + - signingCA + type: object + kubeClient: + description: KubeClient holds the configuration of the KubeClient + type registration + properties: + hubPermissions: + description: HubPermissions represent the permission configurations + of the addon agent to access the hub cluster + items: + description: HubPermissionConfig configures the permission + of the addon agent to access the hub cluster. Will create + a RoleBinding in the same namespace as the managedClusterAddon + to bind the user provided ClusterRole/Role to the "system:open-cluster-management:cluster::addon:" + Group. + properties: + roleRef: + description: RoleRef is an reference to the permission + resource. it could be a role or a cluster role, + the user must make sure it exist on the hub cluster. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - apiGroup + - kind + - name + type: object + x-kubernetes-map-type: atomic + singleNamespace: + description: SingleNamespace contains the configuration + of SingleNamespace type binding. It is required + when the type is SingleNamespace + properties: + namespace: + type: string + required: + - namespace + type: object + type: + description: 'Type of the permissions setting. It + defines how to bind the roleRef on the hub cluster. + It can be: - CurrentCluster: Bind the roleRef to + the namespace with the same name as the managedCluster. + - SingleNamespace: Bind the roleRef to the namespace + specified by SingleNamespaceBindingConfig.' + enum: + - CurrentCluster + - SingleNamespace + type: string + required: + - roleRef + - type + type: object + type: array + type: object + type: + description: 'Type of the registration configuration, it supports: + - KubeClient: the addon agent can access the hub kube apiserver + with kube style API. the signer name should be "kubernetes.io/kube-apiserver-client". + When this type is used, the KubeClientRegistrationConfig can + be used to define the permission of the addon agent to access + the hub cluster - CustomSigner: the addon agent can access + the hub cluster through user-defined endpoints. When this + type is used, the CustomSignerRegistrationConfig can be used + to define how to issue the client certificate for the addon + agent.' + enum: + - KubeClient + - CustomSigner + type: string + required: + - type + type: object + type: array + required: + - addonName + - agentSpec + type: object + required: + - spec + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/register.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/register.go index ac1f3383f..c7124f827 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/register.go +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/register.go @@ -36,6 +36,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ManagedClusterAddOnList{}, &AddOnDeploymentConfig{}, &AddOnDeploymentConfigList{}, + &AddOnTemplate{}, + &AddOnTemplateList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addondeploymentconfig.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addondeploymentconfig.go index 135c7b2bb..b72ac3a4c 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addondeploymentconfig.go +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addondeploymentconfig.go @@ -45,6 +45,18 @@ type AddOnDeploymentConfigSpec struct { // // +optional Registries []ImageMirror `json:"registries,omitempty"` + + // ProxyConfig holds proxy settings for add-on agent on the managed cluster. + // Empty means no proxy settings is available. + // +optional + ProxyConfig ProxyConfig `json:"proxyConfig,omitempty"` + + // AgentInstallNamespace is the namespace where the add-on agent should be installed on the managed cluster. + // +optional + // +kubebuilder:default=open-cluster-management-agent-addon + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + AgentInstallNamespace string `json:"agentInstallNamespace,omitempty"` } // CustomizedVariable represents a customized variable for add-on deployment. @@ -89,6 +101,22 @@ type ImageMirror struct { Source string `json:"source"` } +// ProxyConfig describes the proxy settings for the add-on agent +type ProxyConfig struct { + // HTTPProxy is the URL of the proxy for HTTP requests + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // HTTPSProxy is the URL of the proxy for HTTPS requests + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // NoProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy + // should not be used. + // +optional + NoProxy string `json:"noProxy,omitempty"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AddOnDeploymentConfigList is a collection of add-on deployment config. type AddOnDeploymentConfigList struct { diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addontemplate.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addontemplate.go new file mode 100644 index 000000000..199acc557 --- /dev/null +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_addontemplate.go @@ -0,0 +1,186 @@ +package v1alpha1 + +import ( + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + work "open-cluster-management.io/api/work/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:printcolumn:name="ADDON NAME",type=string,JSONPath=`.spec.addonName` + +// AddOnTemplate is the Custom Resource object, it is used to describe +// how to deploy the addon agent and how to register the addon. +// +// AddOnTemplate is a cluster-scoped resource, and will only be used +// on the hub cluster. +type AddOnTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec holds the registration configuration for the addon and the + // addon agent resources yaml description. + // +kubebuilder:validation:Required + // +required + Spec AddOnTemplateSpec `json:"spec"` +} + +// AddOnTemplateSpec defines the template of an addon agent which will be deployed on managed clusters. +type AddOnTemplateSpec struct { + // AddonName represents the name of the addon which the template belongs to + // +kubebuilder:validation:Required + // +required + AddonName string `json:"addonName"` + + // AgentSpec describes what/how the kubernetes resources of the addon agent to be deployed on a managed cluster. + // +kubebuilder:validation:Required + // +required + AgentSpec work.ManifestWorkSpec `json:"agentSpec"` + + // Registration holds the registration configuration for the addon + // +optional + Registration []RegistrationSpec `json:"registration"` +} + +// RegistrationType represents the type of the registration configuration, +// it could be KubeClient or CustomSigner +type RegistrationType string + +// HubPermissionsBindingType represent how to bind permission resources(role/clusterrole) +// on the hub cluster for the addon agent +type HubPermissionsBindingType string + +const ( + // RegistrationTypeKubeClient represents the KubeClient type registration of the addon agent. + // For this type, the addon agent can access the hub kube apiserver with kube style API. + // The signer name should be "kubernetes.io/kube-apiserver-client". + RegistrationTypeKubeClient RegistrationType = "KubeClient" + // RegistrationTypeCustomSigner represents the CustomSigner type registration of the addon agent. + // For this type, the addon agent can access the hub cluster through user-defined endpoints. + RegistrationTypeCustomSigner RegistrationType = "CustomSigner" + + // HubPermissionsBindingSingleNamespace means that will only allow the addon agent to access the + // resources in a single user defined namespace on the hub cluster. + HubPermissionsBindingSingleNamespace HubPermissionsBindingType = "SingleNamespace" + // HubPermissionsBindingCurrentCluster means that will only allow the addon agent to access the + // resources in managed cluster namespace on the hub cluster. + // It is a specific case of the SingleNamespace type. + HubPermissionsBindingCurrentCluster HubPermissionsBindingType = "CurrentCluster" +) + +// RegistrationSpec describes how to register an addon agent to the hub cluster. +// With the registration defined, The addon agent can access to kube apiserver with kube style API +// or other endpoints on hub cluster with client certificate authentication. During the addon +// registration process, a csr will be created for each Registration on the hub cluster. The +// CSR will be approved automatically, After the csr is approved on the hub cluster, the klusterlet +// agent will create a secret in the installNamespace for the addon agent. +// If the RegistrationType type is KubeClient, the secret name will be "{addon name}-hub-kubeconfig" +// whose content includes key/cert and kubeconfig. Otherwise, If the RegistrationType type is +// CustomSigner the secret name will be "{addon name}-{signer name}-client-cert" whose content +// includes key/cert. +type RegistrationSpec struct { + // Type of the registration configuration, it supports: + // - KubeClient: the addon agent can access the hub kube apiserver with kube style API. + // the signer name should be "kubernetes.io/kube-apiserver-client". When this type is + // used, the KubeClientRegistrationConfig can be used to define the permission of the + // addon agent to access the hub cluster + // - CustomSigner: the addon agent can access the hub cluster through user-defined endpoints. + // When this type is used, the CustomSignerRegistrationConfig can be used to define how + // to issue the client certificate for the addon agent. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum:=KubeClient;CustomSigner + Type RegistrationType `json:"type"` + + // KubeClient holds the configuration of the KubeClient type registration + // +optional + KubeClient *KubeClientRegistrationConfig `json:"kubeClient,omitempty"` + + // CustomSigner holds the configuration of the CustomSigner type registration + // required when the Type is CustomSigner + CustomSigner *CustomSignerRegistrationConfig `json:"customSigner,omitempty"` +} + +type KubeClientRegistrationConfig struct { + // HubPermissions represent the permission configurations of the addon agent to access the hub cluster + // +optional + HubPermissions []HubPermissionConfig `json:"hubPermissions,omitempty"` +} + +// HubPermissionConfig configures the permission of the addon agent to access the hub cluster. +// Will create a RoleBinding in the same namespace as the managedClusterAddon to bind the user +// provided ClusterRole/Role to the "system:open-cluster-management:cluster::addon:" +// Group. +type HubPermissionConfig struct { + // Type of the permissions setting. It defines how to bind the roleRef on the hub cluster. It can be: + // - CurrentCluster: Bind the roleRef to the namespace with the same name as the managedCluster. + // - SingleNamespace: Bind the roleRef to the namespace specified by SingleNamespaceBindingConfig. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum:=CurrentCluster;SingleNamespace + Type HubPermissionsBindingType `json:"type"` + + // RoleRef is an reference to the permission resource. it could be a role or a cluster role, + // the user must make sure it exist on the hub cluster. + // +kubebuilder:validation:Required + RoleRef rbacv1.RoleRef `json:"roleRef"` + + // SingleNamespace contains the configuration of SingleNamespace type binding. + // It is required when the type is SingleNamespace + SingleNamespace *SingleNamespaceBindingConfig `json:"singleNamespace,omitempty"` +} + +type SingleNamespaceBindingConfig struct { + // +kubebuilder:validation:Required + Namespace string `json:"namespace"` +} + +type CustomSignerRegistrationConfig struct { + // signerName is the name of signer that addon agent will use to create csr. + // +required + // +kubebuilder:validation:MaxLength=571 + // +kubebuilder:validation:MinLength=5 + SignerName string `json:"signerName"` + + // Subject is the user subject of the addon agent to be registered to the hub. + // If it is not set, the addon agent will have the default subject + // "subject": { + // "user": "system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}", + // "groups: ["system:open-cluster-management:cluster:{clusterName}:addon:{addonName}", + // "system:open-cluster-management:addon:{addonName}", "system:authenticated"] + // } + Subject *Subject `json:"subject,omitempty"` + + // SigningCA represents the reference of the secret on the hub cluster to sign the CSR + // the secret must be in the namespace where the addon-manager is located, and the secret + // type must be "kubernetes.io/tls" + // Note: The addon manager will not have permission to access the secret by default, so + // the user must grant the permission to the addon manager(by creating rolebinding for + // the addon-manager serviceaccount "addon-manager-controller-sa"). + // +kubebuilder:validation:Required + SigningCA SigningCARef `json:"signingCA"` +} + +// SigningCARef is the reference to the signing CA secret which type must be "kubernetes.io/tls" and +// which namespace must be the same as the addon-manager. +type SigningCARef struct { + // Name of the signing CA secret + // +kubebuilder:validation:Required + Name string `json:"name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// AddOnTemplateList is a collection of addon templates. +type AddOnTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of addon templates. + Items []AddOnTemplate `json:"items"` +} diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go index 1df030e3e..abc458aad 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go @@ -43,7 +43,7 @@ type ManagedClusterAddOnSpec struct { // configs is a list of add-on configurations. // In scenario where the current add-on has its own configurations. - // An empty list means there are no defautl configurations for add-on. + // An empty list means there are no default configurations for add-on. // The default is an empty list // +optional Configs []AddOnConfig `json:"configs,omitempty"` @@ -61,8 +61,9 @@ type RegistrationConfig struct { // subject is the user subject of the addon agent to be registered to the hub. // If it is not set, the addon agent will have the default subject // "subject": { - // "user": "system:open-cluster-management:addon:{addonName}:{clusterName}:{agentName}", - // "groups: ["system:open-cluster-management:addon", "system:open-cluster-management:addon:{addonName}", "system:authenticated"] + // "user": "system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}", + // "groups: ["system:open-cluster-management:cluster:{clusterName}:addon:{addonName}", + // "system:open-cluster-management:addon:{addonName}", "system:authenticated"] // } // // +optional diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go index a5a63a9bb..bcb765747 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go @@ -107,6 +107,7 @@ func (in *AddOnDeploymentConfigSpec) DeepCopyInto(out *AddOnDeploymentConfigSpec *out = make([]ImageMirror, len(*in)) copy(*out, *in) } + out.ProxyConfig = in.ProxyConfig return } @@ -136,6 +137,90 @@ func (in *AddOnMeta) DeepCopy() *AddOnMeta { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnTemplate) DeepCopyInto(out *AddOnTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnTemplate. +func (in *AddOnTemplate) DeepCopy() *AddOnTemplate { + if in == nil { + return nil + } + out := new(AddOnTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddOnTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnTemplateList) DeepCopyInto(out *AddOnTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AddOnTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnTemplateList. +func (in *AddOnTemplateList) DeepCopy() *AddOnTemplateList { + if in == nil { + return nil + } + out := new(AddOnTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddOnTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddOnTemplateSpec) DeepCopyInto(out *AddOnTemplateSpec) { + *out = *in + in.AgentSpec.DeepCopyInto(&out.AgentSpec) + if in.Registration != nil { + in, out := &in.Registration, &out.Registration + *out = make([]RegistrationSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnTemplateSpec. +func (in *AddOnTemplateSpec) DeepCopy() *AddOnTemplateSpec { + if in == nil { + return nil + } + out := new(AddOnTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterManagementAddOn) DeepCopyInto(out *ClusterManagementAddOn) { *out = *in @@ -368,6 +453,28 @@ func (in *ConfigSpecHash) DeepCopy() *ConfigSpecHash { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomSignerRegistrationConfig) DeepCopyInto(out *CustomSignerRegistrationConfig) { + *out = *in + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Subject) + (*in).DeepCopyInto(*out) + } + out.SigningCA = in.SigningCA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomSignerRegistrationConfig. +func (in *CustomSignerRegistrationConfig) DeepCopy() *CustomSignerRegistrationConfig { + if in == nil { + return nil + } + out := new(CustomSignerRegistrationConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomizedVariable) DeepCopyInto(out *CustomizedVariable) { *out = *in @@ -422,6 +529,28 @@ func (in *HealthCheck) DeepCopy() *HealthCheck { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubPermissionConfig) DeepCopyInto(out *HubPermissionConfig) { + *out = *in + out.RoleRef = in.RoleRef + if in.SingleNamespace != nil { + in, out := &in.SingleNamespace, &out.SingleNamespace + *out = new(SingleNamespaceBindingConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubPermissionConfig. +func (in *HubPermissionConfig) DeepCopy() *HubPermissionConfig { + if in == nil { + return nil + } + out := new(HubPermissionConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageMirror) DeepCopyInto(out *ImageMirror) { *out = *in @@ -524,6 +653,29 @@ func (in *InstallStrategy) DeepCopy() *InstallStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeClientRegistrationConfig) DeepCopyInto(out *KubeClientRegistrationConfig) { + *out = *in + if in.HubPermissions != nil { + in, out := &in.HubPermissions, &out.HubPermissions + *out = make([]HubPermissionConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientRegistrationConfig. +func (in *KubeClientRegistrationConfig) DeepCopy() *KubeClientRegistrationConfig { + if in == nil { + return nil + } + out := new(KubeClientRegistrationConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedClusterAddOn) DeepCopyInto(out *ManagedClusterAddOn) { *out = *in @@ -741,6 +893,22 @@ func (in *PlacementStrategy) DeepCopy() *PlacementStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistrationConfig) DeepCopyInto(out *RegistrationConfig) { *out = *in @@ -758,6 +926,32 @@ func (in *RegistrationConfig) DeepCopy() *RegistrationConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrationSpec) DeepCopyInto(out *RegistrationSpec) { + *out = *in + if in.KubeClient != nil { + in, out := &in.KubeClient, &out.KubeClient + *out = new(KubeClientRegistrationConfig) + (*in).DeepCopyInto(*out) + } + if in.CustomSigner != nil { + in, out := &in.CustomSigner, &out.CustomSigner + *out = new(CustomSignerRegistrationConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationSpec. +func (in *RegistrationSpec) DeepCopy() *RegistrationSpec { + if in == nil { + return nil + } + out := new(RegistrationSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) { *out = *in @@ -819,6 +1013,38 @@ func (in *RolloutStrategy) DeepCopy() *RolloutStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SigningCARef) DeepCopyInto(out *SigningCARef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SigningCARef. +func (in *SigningCARef) DeepCopy() *SigningCARef { + if in == nil { + return nil + } + out := new(SigningCARef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleNamespaceBindingConfig) DeepCopyInto(out *SingleNamespaceBindingConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleNamespaceBindingConfig. +func (in *SingleNamespaceBindingConfig) DeepCopy() *SingleNamespaceBindingConfig { + if in == nil { + return nil + } + out := new(SingleNamespaceBindingConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Subject) DeepCopyInto(out *Subject) { *out = *in diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.swagger_doc_generated.go index a87d874c8..2d004eecc 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.swagger_doc_generated.go @@ -31,9 +31,11 @@ func (AddOnDeploymentConfigList) SwaggerDoc() map[string]string { } var map_AddOnDeploymentConfigSpec = map[string]string{ - "customizedVariables": "CustomizedVariables is a list of name-value variables for the current add-on deployment. The add-on implementation can use these variables to render its add-on deployment. The default is an empty list.", - "nodePlacement": "NodePlacement enables explicit control over the scheduling of the add-on agents on the managed cluster. All add-on agent pods are expected to comply with this node placement. If the placement is nil, the placement is not specified, it will be omitted. If the placement is an empty object, the placement will match all nodes and tolerate nothing.", - "registries": "Registries describes how to override images used by the addon agent on the managed cluster. the following example will override image \"quay.io/open-cluster-management/addon-agent\" to \"quay.io/ocm/addon-agent\" when deploying the addon agent\n\nregistries:\n - source: quay.io/open-cluster-management/addon-agent\n mirror: quay.io/ocm/addon-agent", + "customizedVariables": "CustomizedVariables is a list of name-value variables for the current add-on deployment. The add-on implementation can use these variables to render its add-on deployment. The default is an empty list.", + "nodePlacement": "NodePlacement enables explicit control over the scheduling of the add-on agents on the managed cluster. All add-on agent pods are expected to comply with this node placement. If the placement is nil, the placement is not specified, it will be omitted. If the placement is an empty object, the placement will match all nodes and tolerate nothing.", + "registries": "Registries describes how to override images used by the addon agent on the managed cluster. the following example will override image \"quay.io/open-cluster-management/addon-agent\" to \"quay.io/ocm/addon-agent\" when deploying the addon agent\n\nregistries:\n - source: quay.io/open-cluster-management/addon-agent\n mirror: quay.io/ocm/addon-agent", + "proxyConfig": "ProxyConfig holds proxy settings for add-on agent on the managed cluster. Empty means no proxy settings is available.", + "agentInstallNamespace": "AgentInstallNamespace is the namespace where the add-on agent should be installed on the managed cluster.", } func (AddOnDeploymentConfigSpec) SwaggerDoc() map[string]string { @@ -70,6 +72,96 @@ func (NodePlacement) SwaggerDoc() map[string]string { return map_NodePlacement } +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig describes the proxy settings for the add-on agent", + "httpProxy": "HTTPProxy is the URL of the proxy for HTTP requests", + "httpsProxy": "HTTPSProxy is the URL of the proxy for HTTPS requests", + "noProxy": "NoProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_AddOnTemplate = map[string]string{ + "": "AddOnTemplate is the Custom Resource object, it is used to describe how to deploy the addon agent and how to register the addon.\n\nAddOnTemplate is a cluster-scoped resource, and will only be used on the hub cluster.", + "spec": "spec holds the registration configuration for the addon and the addon agent resources yaml description.", +} + +func (AddOnTemplate) SwaggerDoc() map[string]string { + return map_AddOnTemplate +} + +var map_AddOnTemplateList = map[string]string{ + "": "AddOnTemplateList is a collection of addon templates.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of addon templates.", +} + +func (AddOnTemplateList) SwaggerDoc() map[string]string { + return map_AddOnTemplateList +} + +var map_AddOnTemplateSpec = map[string]string{ + "": "AddOnTemplateSpec defines the template of an addon agent which will be deployed on managed clusters.", + "addonName": "AddonName represents the name of the addon which the template belongs to", + "agentSpec": "AgentSpec describes what/how the kubernetes resources of the addon agent to be deployed on a managed cluster.", + "registration": "Registration holds the registration configuration for the addon", +} + +func (AddOnTemplateSpec) SwaggerDoc() map[string]string { + return map_AddOnTemplateSpec +} + +var map_CustomSignerRegistrationConfig = map[string]string{ + "signerName": "signerName is the name of signer that addon agent will use to create csr.", + "subject": "Subject is the user subject of the addon agent to be registered to the hub. If it is not set, the addon agent will have the default subject \"subject\": {\n \"user\": \"system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}\",\n \"groups: [\"system:open-cluster-management:cluster:{clusterName}:addon:{addonName}\",\n \"system:open-cluster-management:addon:{addonName}\", \"system:authenticated\"]\n}", + "signingCA": "SigningCA represents the reference of the secret on the hub cluster to sign the CSR the secret must be in the namespace where the addon-manager is located, and the secret type must be \"kubernetes.io/tls\" Note: The addon manager will not have permission to access the secret by default, so the user must grant the permission to the addon manager(by creating rolebinding for the addon-manager serviceaccount \"addon-manager-controller-sa\").", +} + +func (CustomSignerRegistrationConfig) SwaggerDoc() map[string]string { + return map_CustomSignerRegistrationConfig +} + +var map_HubPermissionConfig = map[string]string{ + "": "HubPermissionConfig configures the permission of the addon agent to access the hub cluster. Will create a RoleBinding in the same namespace as the managedClusterAddon to bind the user provided ClusterRole/Role to the \"system:open-cluster-management:cluster::addon:\" Group.", + "type": "Type of the permissions setting. It defines how to bind the roleRef on the hub cluster. It can be: - CurrentCluster: Bind the roleRef to the namespace with the same name as the managedCluster. - SingleNamespace: Bind the roleRef to the namespace specified by SingleNamespaceBindingConfig.", + "roleRef": "RoleRef is an reference to the permission resource. it could be a role or a cluster role, the user must make sure it exist on the hub cluster.", + "singleNamespace": "SingleNamespace contains the configuration of SingleNamespace type binding. It is required when the type is SingleNamespace", +} + +func (HubPermissionConfig) SwaggerDoc() map[string]string { + return map_HubPermissionConfig +} + +var map_KubeClientRegistrationConfig = map[string]string{ + "hubPermissions": "HubPermissions represent the permission configurations of the addon agent to access the hub cluster", +} + +func (KubeClientRegistrationConfig) SwaggerDoc() map[string]string { + return map_KubeClientRegistrationConfig +} + +var map_RegistrationSpec = map[string]string{ + "": "RegistrationSpec describes how to register an addon agent to the hub cluster. With the registration defined, The addon agent can access to kube apiserver with kube style API or other endpoints on hub cluster with client certificate authentication. During the addon registration process, a csr will be created for each Registration on the hub cluster. The CSR will be approved automatically, After the csr is approved on the hub cluster, the klusterlet agent will create a secret in the installNamespace for the addon agent. If the RegistrationType type is KubeClient, the secret name will be \"{addon name}-hub-kubeconfig\" whose content includes key/cert and kubeconfig. Otherwise, If the RegistrationType type is CustomSigner the secret name will be \"{addon name}-{signer name}-client-cert\" whose content includes key/cert.", + "type": "Type of the registration configuration, it supports: - KubeClient: the addon agent can access the hub kube apiserver with kube style API.\n the signer name should be \"kubernetes.io/kube-apiserver-client\". When this type is\n used, the KubeClientRegistrationConfig can be used to define the permission of the\n addon agent to access the hub cluster\n- CustomSigner: the addon agent can access the hub cluster through user-defined endpoints.\n When this type is used, the CustomSignerRegistrationConfig can be used to define how\n to issue the client certificate for the addon agent.", + "kubeClient": "KubeClient holds the configuration of the KubeClient type registration", + "customSigner": "CustomSigner holds the configuration of the CustomSigner type registration required when the Type is CustomSigner", +} + +func (RegistrationSpec) SwaggerDoc() map[string]string { + return map_RegistrationSpec +} + +var map_SigningCARef = map[string]string{ + "": "SigningCARef is the reference to the signing CA secret which type must be \"kubernetes.io/tls\" and which namespace must be the same as the addon-manager.", + "name": "Name of the signing CA secret", +} + +func (SigningCARef) SwaggerDoc() map[string]string { + return map_SigningCARef +} + var map_AddOnMeta = map[string]string{ "": "AddOnMeta represents a collection of metadata information for the add-on.", "displayName": "displayName represents the name of add-on that will be displayed.", @@ -297,7 +389,7 @@ func (ManagedClusterAddOnList) SwaggerDoc() map[string]string { var map_ManagedClusterAddOnSpec = map[string]string{ "": "ManagedClusterAddOnSpec defines the install configuration of an addon agent on managed cluster.", "installNamespace": "installNamespace is the namespace on the managed cluster to install the addon agent. If it is not set, open-cluster-management-agent-addon namespace is used to install the addon agent.", - "configs": "configs is a list of add-on configurations. In scenario where the current add-on has its own configurations. An empty list means there are no defautl configurations for add-on. The default is an empty list", + "configs": "configs is a list of add-on configurations. In scenario where the current add-on has its own configurations. An empty list means there are no default configurations for add-on. The default is an empty list", } func (ManagedClusterAddOnSpec) SwaggerDoc() map[string]string { @@ -336,7 +428,7 @@ func (ObjectReference) SwaggerDoc() map[string]string { var map_RegistrationConfig = map[string]string{ "": "RegistrationConfig defines the configuration of the addon agent to register to hub. The Klusterlet agent will create a csr for the addon agent with the registrationConfig.", "signerName": "signerName is the name of signer that addon agent will use to create csr.", - "subject": "subject is the user subject of the addon agent to be registered to the hub. If it is not set, the addon agent will have the default subject \"subject\": {\n\t\"user\": \"system:open-cluster-management:addon:{addonName}:{clusterName}:{agentName}\",\n\t\"groups: [\"system:open-cluster-management:addon\", \"system:open-cluster-management:addon:{addonName}\", \"system:authenticated\"]\n}", + "subject": "subject is the user subject of the addon agent to be registered to the hub. If it is not set, the addon agent will have the default subject \"subject\": {\n \"user\": \"system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}\",\n \"groups: [\"system:open-cluster-management:cluster:{clusterName}:addon:{addonName}\",\n \"system:open-cluster-management:addon:{addonName}\", \"system:authenticated\"]\n}", } func (RegistrationConfig) SwaggerDoc() map[string]string { diff --git a/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addon_client.go b/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addon_client.go index 175a2f4a2..40b4b6acd 100644 --- a/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addon_client.go +++ b/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addon_client.go @@ -13,6 +13,7 @@ import ( type AddonV1alpha1Interface interface { RESTClient() rest.Interface AddOnDeploymentConfigsGetter + AddOnTemplatesGetter ClusterManagementAddOnsGetter ManagedClusterAddOnsGetter } @@ -26,6 +27,10 @@ func (c *AddonV1alpha1Client) AddOnDeploymentConfigs(namespace string) AddOnDepl return newAddOnDeploymentConfigs(c, namespace) } +func (c *AddonV1alpha1Client) AddOnTemplates() AddOnTemplateInterface { + return newAddOnTemplates(c) +} + func (c *AddonV1alpha1Client) ClusterManagementAddOns() ClusterManagementAddOnInterface { return newClusterManagementAddOns(c) } diff --git a/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addontemplate.go b/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addontemplate.go new file mode 100644 index 000000000..9cd0fb722 --- /dev/null +++ b/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/addontemplate.go @@ -0,0 +1,152 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + scheme "open-cluster-management.io/api/client/addon/clientset/versioned/scheme" +) + +// AddOnTemplatesGetter has a method to return a AddOnTemplateInterface. +// A group's client should implement this interface. +type AddOnTemplatesGetter interface { + AddOnTemplates() AddOnTemplateInterface +} + +// AddOnTemplateInterface has methods to work with AddOnTemplate resources. +type AddOnTemplateInterface interface { + Create(ctx context.Context, addOnTemplate *v1alpha1.AddOnTemplate, opts v1.CreateOptions) (*v1alpha1.AddOnTemplate, error) + Update(ctx context.Context, addOnTemplate *v1alpha1.AddOnTemplate, opts v1.UpdateOptions) (*v1alpha1.AddOnTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.AddOnTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AddOnTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AddOnTemplate, err error) + AddOnTemplateExpansion +} + +// addOnTemplates implements AddOnTemplateInterface +type addOnTemplates struct { + client rest.Interface +} + +// newAddOnTemplates returns a AddOnTemplates +func newAddOnTemplates(c *AddonV1alpha1Client) *addOnTemplates { + return &addOnTemplates{ + client: c.RESTClient(), + } +} + +// Get takes name of the addOnTemplate, and returns the corresponding addOnTemplate object, and an error if there is any. +func (c *addOnTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AddOnTemplate, err error) { + result = &v1alpha1.AddOnTemplate{} + err = c.client.Get(). + Resource("addontemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of AddOnTemplates that match those selectors. +func (c *addOnTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AddOnTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.AddOnTemplateList{} + err = c.client.Get(). + Resource("addontemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested addOnTemplates. +func (c *addOnTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("addontemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a addOnTemplate and creates it. Returns the server's representation of the addOnTemplate, and an error, if there is any. +func (c *addOnTemplates) Create(ctx context.Context, addOnTemplate *v1alpha1.AddOnTemplate, opts v1.CreateOptions) (result *v1alpha1.AddOnTemplate, err error) { + result = &v1alpha1.AddOnTemplate{} + err = c.client.Post(). + Resource("addontemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(addOnTemplate). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a addOnTemplate and updates it. Returns the server's representation of the addOnTemplate, and an error, if there is any. +func (c *addOnTemplates) Update(ctx context.Context, addOnTemplate *v1alpha1.AddOnTemplate, opts v1.UpdateOptions) (result *v1alpha1.AddOnTemplate, err error) { + result = &v1alpha1.AddOnTemplate{} + err = c.client.Put(). + Resource("addontemplates"). + Name(addOnTemplate.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(addOnTemplate). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the addOnTemplate and deletes it. Returns an error if one occurs. +func (c *addOnTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("addontemplates"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *addOnTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("addontemplates"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched addOnTemplate. +func (c *addOnTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AddOnTemplate, err error) { + result = &v1alpha1.AddOnTemplate{} + err = c.client.Patch(pt). + Resource("addontemplates"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/generated_expansion.go b/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/generated_expansion.go index c3d0b0d49..06c0c12ad 100644 --- a/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/generated_expansion.go +++ b/vendor/open-cluster-management.io/api/client/addon/clientset/versioned/typed/addon/v1alpha1/generated_expansion.go @@ -4,6 +4,8 @@ package v1alpha1 type AddOnDeploymentConfigExpansion interface{} +type AddOnTemplateExpansion interface{} + type ClusterManagementAddOnExpansion interface{} type ManagedClusterAddOnExpansion interface{} diff --git a/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/cluster_client.go b/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/cluster_client.go index 552027b3d..42dae5bdd 100644 --- a/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/cluster_client.go +++ b/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/cluster_client.go @@ -12,8 +12,6 @@ import ( type ClusterV1beta1Interface interface { RESTClient() rest.Interface - ManagedClusterSetsGetter - ManagedClusterSetBindingsGetter PlacementsGetter PlacementDecisionsGetter } @@ -23,14 +21,6 @@ type ClusterV1beta1Client struct { restClient rest.Interface } -func (c *ClusterV1beta1Client) ManagedClusterSets() ManagedClusterSetInterface { - return newManagedClusterSets(c) -} - -func (c *ClusterV1beta1Client) ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingInterface { - return newManagedClusterSetBindings(c, namespace) -} - func (c *ClusterV1beta1Client) Placements(namespace string) PlacementInterface { return newPlacements(c, namespace) } diff --git a/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/generated_expansion.go b/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/generated_expansion.go index a514c2160..701affa7d 100644 --- a/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/generated_expansion.go +++ b/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/generated_expansion.go @@ -2,10 +2,6 @@ package v1beta1 -type ManagedClusterSetExpansion interface{} - -type ManagedClusterSetBindingExpansion interface{} - type PlacementExpansion interface{} type PlacementDecisionExpansion interface{} diff --git a/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclusterset.go b/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclusterset.go deleted file mode 100644 index 55fafd650..000000000 --- a/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclusterset.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" - v1beta1 "open-cluster-management.io/api/cluster/v1beta1" -) - -// ManagedClusterSetsGetter has a method to return a ManagedClusterSetInterface. -// A group's client should implement this interface. -type ManagedClusterSetsGetter interface { - ManagedClusterSets() ManagedClusterSetInterface -} - -// ManagedClusterSetInterface has methods to work with ManagedClusterSet resources. -type ManagedClusterSetInterface interface { - Create(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.CreateOptions) (*v1beta1.ManagedClusterSet, error) - Update(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSet, error) - UpdateStatus(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSet, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ManagedClusterSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ManagedClusterSetList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSet, err error) - ManagedClusterSetExpansion -} - -// managedClusterSets implements ManagedClusterSetInterface -type managedClusterSets struct { - client rest.Interface -} - -// newManagedClusterSets returns a ManagedClusterSets -func newManagedClusterSets(c *ClusterV1beta1Client) *managedClusterSets { - return &managedClusterSets{ - client: c.RESTClient(), - } -} - -// Get takes name of the managedClusterSet, and returns the corresponding managedClusterSet object, and an error if there is any. -func (c *managedClusterSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ManagedClusterSet, err error) { - result = &v1beta1.ManagedClusterSet{} - err = c.client.Get(). - Resource("managedclustersets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ManagedClusterSets that match those selectors. -func (c *managedClusterSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ManagedClusterSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ManagedClusterSetList{} - err = c.client.Get(). - Resource("managedclustersets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested managedClusterSets. -func (c *managedClusterSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("managedclustersets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a managedClusterSet and creates it. Returns the server's representation of the managedClusterSet, and an error, if there is any. -func (c *managedClusterSets) Create(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.CreateOptions) (result *v1beta1.ManagedClusterSet, err error) { - result = &v1beta1.ManagedClusterSet{} - err = c.client.Post(). - Resource("managedclustersets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(managedClusterSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a managedClusterSet and updates it. Returns the server's representation of the managedClusterSet, and an error, if there is any. -func (c *managedClusterSets) Update(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSet, err error) { - result = &v1beta1.ManagedClusterSet{} - err = c.client.Put(). - Resource("managedclustersets"). - Name(managedClusterSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(managedClusterSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *managedClusterSets) UpdateStatus(ctx context.Context, managedClusterSet *v1beta1.ManagedClusterSet, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSet, err error) { - result = &v1beta1.ManagedClusterSet{} - err = c.client.Put(). - Resource("managedclustersets"). - Name(managedClusterSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(managedClusterSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the managedClusterSet and deletes it. Returns an error if one occurs. -func (c *managedClusterSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("managedclustersets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *managedClusterSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("managedclustersets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched managedClusterSet. -func (c *managedClusterSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSet, err error) { - result = &v1beta1.ManagedClusterSet{} - err = c.client.Patch(pt). - Resource("managedclustersets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclustersetbinding.go b/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclustersetbinding.go deleted file mode 100644 index 9f81b120f..000000000 --- a/vendor/open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1beta1/managedclustersetbinding.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme" - v1beta1 "open-cluster-management.io/api/cluster/v1beta1" -) - -// ManagedClusterSetBindingsGetter has a method to return a ManagedClusterSetBindingInterface. -// A group's client should implement this interface. -type ManagedClusterSetBindingsGetter interface { - ManagedClusterSetBindings(namespace string) ManagedClusterSetBindingInterface -} - -// ManagedClusterSetBindingInterface has methods to work with ManagedClusterSetBinding resources. -type ManagedClusterSetBindingInterface interface { - Create(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.CreateOptions) (*v1beta1.ManagedClusterSetBinding, error) - Update(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSetBinding, error) - UpdateStatus(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (*v1beta1.ManagedClusterSetBinding, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ManagedClusterSetBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ManagedClusterSetBindingList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSetBinding, err error) - ManagedClusterSetBindingExpansion -} - -// managedClusterSetBindings implements ManagedClusterSetBindingInterface -type managedClusterSetBindings struct { - client rest.Interface - ns string -} - -// newManagedClusterSetBindings returns a ManagedClusterSetBindings -func newManagedClusterSetBindings(c *ClusterV1beta1Client, namespace string) *managedClusterSetBindings { - return &managedClusterSetBindings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the managedClusterSetBinding, and returns the corresponding managedClusterSetBinding object, and an error if there is any. -func (c *managedClusterSetBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { - result = &v1beta1.ManagedClusterSetBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("managedclustersetbindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ManagedClusterSetBindings that match those selectors. -func (c *managedClusterSetBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ManagedClusterSetBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ManagedClusterSetBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("managedclustersetbindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested managedClusterSetBindings. -func (c *managedClusterSetBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("managedclustersetbindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a managedClusterSetBinding and creates it. Returns the server's representation of the managedClusterSetBinding, and an error, if there is any. -func (c *managedClusterSetBindings) Create(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.CreateOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { - result = &v1beta1.ManagedClusterSetBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("managedclustersetbindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(managedClusterSetBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a managedClusterSetBinding and updates it. Returns the server's representation of the managedClusterSetBinding, and an error, if there is any. -func (c *managedClusterSetBindings) Update(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { - result = &v1beta1.ManagedClusterSetBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("managedclustersetbindings"). - Name(managedClusterSetBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(managedClusterSetBinding). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *managedClusterSetBindings) UpdateStatus(ctx context.Context, managedClusterSetBinding *v1beta1.ManagedClusterSetBinding, opts v1.UpdateOptions) (result *v1beta1.ManagedClusterSetBinding, err error) { - result = &v1beta1.ManagedClusterSetBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("managedclustersetbindings"). - Name(managedClusterSetBinding.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(managedClusterSetBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the managedClusterSetBinding and deletes it. Returns an error if one occurs. -func (c *managedClusterSetBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("managedclustersetbindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *managedClusterSetBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("managedclustersetbindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched managedClusterSetBinding. -func (c *managedClusterSetBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ManagedClusterSetBinding, err error) { - result = &v1beta1.ManagedClusterSetBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("managedclustersetbindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/open-cluster-management.io/api/cluster/v1/types.go b/vendor/open-cluster-management.io/api/cluster/v1/types.go index 69a3243e1..499ca72d1 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1/types.go +++ b/vendor/open-cluster-management.io/api/cluster/v1/types.go @@ -234,3 +234,26 @@ const ( // ClusterNameLabelKey is the key of a label to set ManagedCluster name. ClusterNameLabelKey = "open-cluster-management.io/cluster-name" ) + +const ( + // ClusterImageRegistriesAnnotationKey is an annotation key on ManagedCluster to configure image override for addons + // running on the ManagedCluster, the value of the annotation should be a json string like this: + // + // { + // "registries": [ + // { + // "source": "quay.io/ocm", + // "mirrors": "quay.io/open-cluster-management" + // } + // ] + // } + // + // Note: Image registries configured in the addonDeploymentConfig will take precedence over this annotation. + ClusterImageRegistriesAnnotationKey = "open-cluster-management.io/image-registries" +) + +const ( + // ManagedClusterFinalizer is the name of the finalizer added to ManagedCluster, it is to ensure that resources + // relating to the ManagedCluster is removed when the ManagedCluster is deleted. + ManagedClusterFinalizer = "cluster.open-cluster-management.io/api-resource-cleanup" +) diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go new file mode 100644 index 000000000..d70865825 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go @@ -0,0 +1,382 @@ +package v1alpha1 + +import ( + "fmt" + "math" + "regexp" + "sort" + "strconv" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/clock" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +var RolloutClock = clock.Clock(clock.RealClock{}) +var maxTimeDuration = time.Duration(math.MaxInt64) + +// RolloutStatus represents the status of a rollout operation. +type RolloutStatus int + +const ( + // ToApply indicates that the resource's desired status has not been applied yet. + ToApply RolloutStatus = iota + // Progressing indicates that the resource's desired status is applied and last applied status is not updated. + Progressing + // Succeeded indicates that the resource's desired status is applied and last applied status is successful. + Succeeded + // Failed indicates that the resource's desired status is applied and last applied status has failed. + Failed + // TimeOut indicates that the rollout status is progressing or failed and the status remains + // for longer than the timeout, resulting in a timeout status. + TimeOut + // Skip indicates that the rollout should be skipped on this cluster. + Skip +) + +// ClusterRolloutStatusFunc defines a function to return the rollout status for a managed cluster. +type ClusterRolloutStatusFunc func(clusterName string) ClusterRolloutStatus + +// ClusterRolloutStatus holds the rollout status information for a cluster. +type ClusterRolloutStatus struct { + // GroupKey represents the cluster group key (optional field). + GroupKey clusterv1beta1.GroupKey + // Status is the required field indicating the rollout status. + Status RolloutStatus + // LastTransitionTime is the last transition time of the rollout status (optional field). + // Used to calculate timeout for progressing and failed status. + LastTransitionTime *metav1.Time + // TimeOutTime is the timeout time when the status is progressing or failed (optional field). + TimeOutTime *metav1.Time +} + +// RolloutResult contains the clusters to be rolled out and the clusters that have timed out. +type RolloutResult struct { + // ClustersToRollout is a map where the key is the cluster name and the value is the ClusterRolloutStatus. + ClustersToRollout map[string]ClusterRolloutStatus + // ClustersTimeOut is a map where the key is the cluster name and the value is the ClusterRolloutStatus. + ClustersTimeOut map[string]ClusterRolloutStatus +} + +// +k8s:deepcopy-gen=false +type RolloutHandler struct { + // placement decision tracker + pdTracker *clusterv1beta1.PlacementDecisionClustersTracker +} + +func NewRolloutHandler(pdTracker *clusterv1beta1.PlacementDecisionClustersTracker) (*RolloutHandler, error) { + if pdTracker == nil { + return nil, fmt.Errorf("invalid placement decision tracker %v", pdTracker) + } + + return &RolloutHandler{pdTracker: pdTracker}, nil +} + +// The input is a duck type RolloutStrategy and a ClusterRolloutStatusFunc to return the rollout status on each managed cluster. +// Return the strategy actual take effect and a list of clusters that need to rollout and that are timeout. +// +// ClustersToRollout: If mandatory decision groups are defined in strategy, will return the clusters to rollout in mandatory decision groups first. +// When all the mandatory decision groups rollout successfully, will return the rest of the clusters that need to rollout. +// +// ClustersTimeOut: If the cluster status is Progressing or Failed, and the status lasts longer than timeout defined in strategy, +// will list them RolloutResult.ClustersTimeOut with status TimeOut. +func (r *RolloutHandler) GetRolloutCluster(rolloutStrategy RolloutStrategy, statusFunc ClusterRolloutStatusFunc) (*RolloutStrategy, RolloutResult, error) { + switch rolloutStrategy.Type { + case All: + return r.getRolloutAllClusters(rolloutStrategy, statusFunc) + case Progressive: + return r.getProgressiveClusters(rolloutStrategy, statusFunc) + case ProgressivePerGroup: + return r.getProgressivePerGroupClusters(rolloutStrategy, statusFunc) + default: + return nil, RolloutResult{}, fmt.Errorf("incorrect rollout strategy type %v", rolloutStrategy.Type) + } +} + +func (r *RolloutHandler) getRolloutAllClusters(rolloutStrategy RolloutStrategy, statusFunc ClusterRolloutStatusFunc) (*RolloutStrategy, RolloutResult, error) { + // Prepare the rollout strategy + strategy := RolloutStrategy{Type: All} + strategy.All = rolloutStrategy.All.DeepCopy() + if strategy.All == nil { + strategy.All = &RolloutAll{} + } + + // Parse timeout for the rollout + failureTimeout, err := parseTimeout(strategy.All.Timeout.Timeout) + if err != nil { + return &strategy, RolloutResult{}, err + } + + // Get all clusters and perform progressive rollout + totalClusterGroups := r.pdTracker.ExistingClusterGroupsBesides() + totalClusters := totalClusterGroups.GetClusters().UnsortedList() + rolloutResult := progressivePerCluster(totalClusterGroups, len(totalClusters), failureTimeout, statusFunc) + + return &strategy, rolloutResult, nil +} + +func (r *RolloutHandler) getProgressiveClusters(rolloutStrategy RolloutStrategy, statusFunc ClusterRolloutStatusFunc) (*RolloutStrategy, RolloutResult, error) { + // Prepare the rollout strategy + strategy := RolloutStrategy{Type: Progressive} + strategy.Progressive = rolloutStrategy.Progressive.DeepCopy() + if strategy.Progressive == nil { + strategy.Progressive = &RolloutProgressive{} + } + + // Upgrade mandatory decision groups first + groupKeys := decisionGroupsToGroupKeys(strategy.Progressive.MandatoryDecisionGroups.MandatoryDecisionGroups) + clusterGroups := r.pdTracker.ExistingClusterGroups(groupKeys...) + + // Perform progressive rollout for mandatory decision groups + rolloutResult := progressivePerGroup(clusterGroups, maxTimeDuration, statusFunc) + if len(rolloutResult.ClustersToRollout) > 0 { + return &strategy, rolloutResult, nil + } + + // Parse timeout for non-mandatory decision groups + failureTimeout, err := parseTimeout(strategy.Progressive.Timeout.Timeout) + if err != nil { + return &strategy, RolloutResult{}, err + } + + // Calculate the length for progressive rollout + totalClusters := r.pdTracker.ExistingClusterGroupsBesides().GetClusters() + length, err := calculateLength(strategy.Progressive.MaxConcurrency, len(totalClusters)) + if err != nil { + return &strategy, RolloutResult{}, err + } + + // Upgrade the remaining clusters + restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(clusterGroups.GetOrderedGroupKeys()...) + rolloutResult = progressivePerCluster(restClusterGroups, length, failureTimeout, statusFunc) + + return &strategy, rolloutResult, nil +} + +func (r *RolloutHandler) getProgressivePerGroupClusters(rolloutStrategy RolloutStrategy, statusFunc ClusterRolloutStatusFunc) (*RolloutStrategy, RolloutResult, error) { + // Prepare the rollout strategy + strategy := RolloutStrategy{Type: ProgressivePerGroup} + strategy.ProgressivePerGroup = rolloutStrategy.ProgressivePerGroup.DeepCopy() + if strategy.ProgressivePerGroup == nil { + strategy.ProgressivePerGroup = &RolloutProgressivePerGroup{} + } + + // Upgrade mandatory decision groups first + mandatoryDecisionGroups := strategy.ProgressivePerGroup.MandatoryDecisionGroups.MandatoryDecisionGroups + groupKeys := decisionGroupsToGroupKeys(mandatoryDecisionGroups) + clusterGroups := r.pdTracker.ExistingClusterGroups(groupKeys...) + + // Perform progressive rollout per group for mandatory decision groups + rolloutResult := progressivePerGroup(clusterGroups, maxTimeDuration, statusFunc) + if len(rolloutResult.ClustersToRollout) > 0 { + return &strategy, rolloutResult, nil + } + + // Parse timeout for non-mandatory decision groups + failureTimeout, err := parseTimeout(strategy.ProgressivePerGroup.Timeout.Timeout) + if err != nil { + return &strategy, RolloutResult{}, err + } + + // Upgrade the rest of the decision groups + restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(clusterGroups.GetOrderedGroupKeys()...) + + // Perform progressive rollout per group for the remaining decision groups + rolloutResult = progressivePerGroup(restClusterGroups, failureTimeout, statusFunc) + return &strategy, rolloutResult, nil +} + +func progressivePerCluster(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, length int, timeout time.Duration, statusFunc ClusterRolloutStatusFunc) RolloutResult { + rolloutClusters := map[string]ClusterRolloutStatus{} + timeoutClusters := map[string]ClusterRolloutStatus{} + + if length == 0 { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + } + } + + clusters := clusterGroupsMap.GetClusters().UnsortedList() + clusterToGroupKey := clusterGroupsMap.ClusterToGroupKey() + + // Sort the clusters in alphabetical order to ensure consistency. + sort.Strings(clusters) + for _, cluster := range clusters { + status := statusFunc(cluster) + if groupKey, exists := clusterToGroupKey[cluster]; exists { + status.GroupKey = groupKey + } + + newStatus, needToRollout := determineRolloutStatusAndContinue(status, timeout) + status.Status = newStatus.Status + status.TimeOutTime = newStatus.TimeOutTime + + if needToRollout { + rolloutClusters[cluster] = status + } + if status.Status == TimeOut { + timeoutClusters[cluster] = status + } + + if len(rolloutClusters)%length == 0 && len(rolloutClusters) > 0 { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + } + } + } + + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + } +} + +func progressivePerGroup(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, timeout time.Duration, statusFunc ClusterRolloutStatusFunc) RolloutResult { + rolloutClusters := map[string]ClusterRolloutStatus{} + timeoutClusters := map[string]ClusterRolloutStatus{} + + clusterGroupKeys := clusterGroupsMap.GetOrderedGroupKeys() + + for _, key := range clusterGroupKeys { + if subclusters, ok := clusterGroupsMap[key]; ok { + // Iterate through clusters in the group + for _, cluster := range subclusters.UnsortedList() { + status := statusFunc(cluster) + status.GroupKey = key + + newStatus, needToRollout := determineRolloutStatusAndContinue(status, timeout) + status.Status = newStatus.Status + status.TimeOutTime = newStatus.TimeOutTime + + if needToRollout { + rolloutClusters[cluster] = status + } + if status.Status == TimeOut { + timeoutClusters[cluster] = status + } + } + + // Return if there are clusters to rollout + if len(rolloutClusters) > 0 { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + } + } + } + } + + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + } +} + +// determineRolloutStatusAndContinue checks whether a cluster should continue its rollout based on +// its current status and timeout. The function returns an updated cluster status and a boolean +// indicating whether the rollout should continue. +// +// The timeout parameter is utilized for handling progressing and failed statuses: +// 1. If timeout is set to None (maxTimeDuration), the function will wait until cluster reaching a success status. +// It returns true to include the cluster in the result and halts the rollout of other clusters or groups. +// 2. If timeout is set to 0, the function proceeds with upgrading other clusters without waiting. +// It returns false to skip waiting for the cluster to reach a success status and continues to rollout others. +func determineRolloutStatusAndContinue(status ClusterRolloutStatus, timeout time.Duration) (*ClusterRolloutStatus, bool) { + newStatus := status.DeepCopy() + switch status.Status { + case ToApply: + return newStatus, true + case TimeOut, Succeeded, Skip: + return newStatus, false + case Progressing, Failed: + timeOutTime := getTimeOutTime(status.LastTransitionTime, timeout) + newStatus.TimeOutTime = timeOutTime + + // check if current time is before the timeout time + if RolloutClock.Now().Before(timeOutTime.Time) { + return newStatus, true + } else { + newStatus.Status = TimeOut + return newStatus, false + } + default: + return newStatus, true + } +} + +// get the timeout time +func getTimeOutTime(startTime *metav1.Time, timeout time.Duration) *metav1.Time { + var timeoutTime time.Time + if startTime == nil { + timeoutTime = RolloutClock.Now().Add(timeout) + } else { + timeoutTime = startTime.Add(timeout) + } + return &metav1.Time{Time: timeoutTime} +} + +func calculateLength(maxConcurrency intstr.IntOrString, total int) (int, error) { + length := total + + switch maxConcurrency.Type { + case intstr.Int: + length = maxConcurrency.IntValue() + case intstr.String: + str := maxConcurrency.StrVal + if strings.HasSuffix(str, "%") { + f, err := strconv.ParseFloat(str[:len(str)-1], 64) + if err != nil { + return length, err + } + length = int(math.Ceil(f / 100 * float64(total))) + } else { + return length, fmt.Errorf("%v invalid type: string is not a percentage", maxConcurrency) + } + default: + return length, fmt.Errorf("incorrect MaxConcurrency type %v", maxConcurrency.Type) + } + + if length <= 0 || length > total { + length = total + } + + return length, nil +} + +func parseTimeout(timeoutStr string) (time.Duration, error) { + // Define the regex pattern to match the timeout string + pattern := "^(([0-9])+[h|m|s])|None$" + regex := regexp.MustCompile(pattern) + + if timeoutStr == "None" || timeoutStr == "" { + // If the timeout is "None" or empty, return the maximum duration + return maxTimeDuration, nil + } + + // Check if the timeout string matches the pattern + if !regex.MatchString(timeoutStr) { + return maxTimeDuration, fmt.Errorf("invalid timeout format") + } + + return time.ParseDuration(timeoutStr) +} + +func decisionGroupsToGroupKeys(decisionsGroup []MandatoryDecisionGroup) []clusterv1beta1.GroupKey { + result := []clusterv1beta1.GroupKey{} + for _, d := range decisionsGroup { + gk := clusterv1beta1.GroupKey{} + // GroupName is considered first to select the decisionGroups then GroupIndex. + if d.GroupName != "" { + gk.GroupName = d.GroupName + } else { + gk.GroupIndex = d.GroupIndex + } + result = append(result, gk) + } + return result +} diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go new file mode 100644 index 000000000..c9fa38155 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go @@ -0,0 +1,103 @@ +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +k8s:deepcopy-gen=true + +// RolloutStrategy API used by workload applier APIs to define how the workload will be applied to the selected clusters by the Placement and DecisionStrategy. + +const ( + //All means apply the workload to all clusters in the decision groups at once. + All string = "All" + //Progressive means apply the workload to the selected clusters progressively per cluster. + Progressive string = "Progressive" + //ProgressivePerGroup means apply the workload to the selected clusters progressively per group. + ProgressivePerGroup string = "ProgressivePerGroup" +) + +// Rollout strategy to apply workload to the selected clusters by Placement and DecisionStrategy. +type RolloutStrategy struct { + // Rollout strategy Types are All, Progressive and ProgressivePerGroup + // 1) All means apply the workload to all clusters in the decision groups at once. + // 2) Progressive means apply the workload to the selected clusters progressively per cluster. The workload will not be applied to the next cluster unless one of the current applied clusters reach the successful state or timeout. + // 3) ProgressivePerGroup means apply the workload to decisionGroup clusters progressively per group. The workload will not be applied to the next decisionGroup unless all clusters in the current group reach the successful state or timeout. + // +kubebuilder:validation:Enum=All;Progressive;ProgressivePerGroup + // +kubebuilder:default:=All + // +optional + Type string `json:"type,omitempty"` + + // All define required fields for RolloutStrategy type All + // +optional + All *RolloutAll `json:"all,omitempty"` + + // Progressive define required fields for RolloutStrategy type Progressive + // +optional + Progressive *RolloutProgressive `json:"progressive,omitempty"` + + // ProgressivePerGroup define required fields for RolloutStrategy type ProgressivePerGroup + // +optional + ProgressivePerGroup *RolloutProgressivePerGroup `json:"progressivePerGroup,omitempty"` +} + +// Timeout to consider while applying the workload. +type Timeout struct { + // Timeout define how long workload applier controller will wait till workload reach successful state in the cluster. + // Timeout default value is None meaning the workload applier will not proceed apply workload to other clusters if did not reach the successful state. + // Timeout must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s + // +kubebuilder:validation:Pattern="^(([0-9])+[h|m|s])|None$" + // +kubebuilder:default:=None + // +optional + Timeout string `json:"timeout,omitempty"` +} + +// MandatoryDecisionGroup set the decision group name or group index. +// GroupName is considered first to select the decisionGroups then GroupIndex. +type MandatoryDecisionGroup struct { + // GroupName of the decision group should match the placementDecisions label value with label key cluster.open-cluster-management.io/decision-group-name + // +optional + GroupName string `json:"groupName,omitempty"` + + // GroupIndex of the decision group should match the placementDecisions label value with label key cluster.open-cluster-management.io/decision-group-index + // +optional + GroupIndex int32 `json:"groupIndex,omitempty"` +} + +// MandatoryDecisionGroups +type MandatoryDecisionGroups struct { + // List of the decision groups names or indexes to apply the workload first and fail if workload did not reach successful state. + // GroupName or GroupIndex must match with the decisionGroups defined in the placement's decisionStrategy + // +optional + MandatoryDecisionGroups []MandatoryDecisionGroup `json:"mandatoryDecisionGroups,omitempty"` +} + +// RolloutAll is a RolloutStrategy Type +type RolloutAll struct { + // +optional + Timeout `json:",inline"` +} + +// RolloutProgressivePerGroup is a RolloutStrategy Type +type RolloutProgressivePerGroup struct { + // +optional + MandatoryDecisionGroups `json:",inline"` + + // +optional + Timeout `json:",inline"` +} + +// RolloutProgressive is a RolloutStrategy Type +type RolloutProgressive struct { + // +optional + MandatoryDecisionGroups `json:",inline"` + + // MaxConcurrency is the max number of clusters to deploy workload concurrently. The default value for MaxConcurrency is determined from the clustersPerDecisionGroup defined in the placement->DecisionStrategy. + // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" + // +kubebuilder:validation:XIntOrString + // +optional + MaxConcurrency intstr.IntOrString `json:"maxConcurrency,omitempty"` + + // +optional + Timeout `json:",inline"` +} diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go index fd4fa6bee..a72d4c816 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go @@ -193,3 +193,196 @@ func (in *ClusterClaimSpec) DeepCopy() *ClusterClaimSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRolloutStatus) DeepCopyInto(out *ClusterRolloutStatus) { + *out = *in + out.GroupKey = in.GroupKey + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + if in.TimeOutTime != nil { + in, out := &in.TimeOutTime, &out.TimeOutTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRolloutStatus. +func (in *ClusterRolloutStatus) DeepCopy() *ClusterRolloutStatus { + if in == nil { + return nil + } + out := new(ClusterRolloutStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MandatoryDecisionGroup) DeepCopyInto(out *MandatoryDecisionGroup) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MandatoryDecisionGroup. +func (in *MandatoryDecisionGroup) DeepCopy() *MandatoryDecisionGroup { + if in == nil { + return nil + } + out := new(MandatoryDecisionGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MandatoryDecisionGroups) DeepCopyInto(out *MandatoryDecisionGroups) { + *out = *in + if in.MandatoryDecisionGroups != nil { + in, out := &in.MandatoryDecisionGroups, &out.MandatoryDecisionGroups + *out = make([]MandatoryDecisionGroup, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MandatoryDecisionGroups. +func (in *MandatoryDecisionGroups) DeepCopy() *MandatoryDecisionGroups { + if in == nil { + return nil + } + out := new(MandatoryDecisionGroups) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutAll) DeepCopyInto(out *RolloutAll) { + *out = *in + out.Timeout = in.Timeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAll. +func (in *RolloutAll) DeepCopy() *RolloutAll { + if in == nil { + return nil + } + out := new(RolloutAll) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutProgressive) DeepCopyInto(out *RolloutProgressive) { + *out = *in + in.MandatoryDecisionGroups.DeepCopyInto(&out.MandatoryDecisionGroups) + out.MaxConcurrency = in.MaxConcurrency + out.Timeout = in.Timeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutProgressive. +func (in *RolloutProgressive) DeepCopy() *RolloutProgressive { + if in == nil { + return nil + } + out := new(RolloutProgressive) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutProgressivePerGroup) DeepCopyInto(out *RolloutProgressivePerGroup) { + *out = *in + in.MandatoryDecisionGroups.DeepCopyInto(&out.MandatoryDecisionGroups) + out.Timeout = in.Timeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutProgressivePerGroup. +func (in *RolloutProgressivePerGroup) DeepCopy() *RolloutProgressivePerGroup { + if in == nil { + return nil + } + out := new(RolloutProgressivePerGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutResult) DeepCopyInto(out *RolloutResult) { + *out = *in + if in.ClustersToRollout != nil { + in, out := &in.ClustersToRollout, &out.ClustersToRollout + *out = make(map[string]ClusterRolloutStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.ClustersTimeOut != nil { + in, out := &in.ClustersTimeOut, &out.ClustersTimeOut + *out = make(map[string]ClusterRolloutStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutResult. +func (in *RolloutResult) DeepCopy() *RolloutResult { + if in == nil { + return nil + } + out := new(RolloutResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutStrategy) DeepCopyInto(out *RolloutStrategy) { + *out = *in + if in.All != nil { + in, out := &in.All, &out.All + *out = new(RolloutAll) + **out = **in + } + if in.Progressive != nil { + in, out := &in.Progressive, &out.Progressive + *out = new(RolloutProgressive) + (*in).DeepCopyInto(*out) + } + if in.ProgressivePerGroup != nil { + in, out := &in.ProgressivePerGroup, &out.ProgressivePerGroup + *out = new(RolloutProgressivePerGroup) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStrategy. +func (in *RolloutStrategy) DeepCopy() *RolloutStrategy { + if in == nil { + return nil + } + out := new(RolloutStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Timeout) DeepCopyInto(out *Timeout) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timeout. +func (in *Timeout) DeepCopy() *Timeout { + if in == nil { + return nil + } + out := new(Timeout) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go index 864fc58e7..4cadc7a2e 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go @@ -78,4 +78,69 @@ func (AddOnPlacementScoreStatus) SwaggerDoc() map[string]string { return map_AddOnPlacementScoreStatus } +var map_MandatoryDecisionGroup = map[string]string{ + "": "MandatoryDecisionGroup set the decision group name or group index. GroupName is considered first to select the decisionGroups then GroupIndex.", + "groupName": "GroupName of the decision group should match the placementDecisions label value with label key cluster.open-cluster-management.io/decision-group-name", + "groupIndex": "GroupIndex of the decision group should match the placementDecisions label value with label key cluster.open-cluster-management.io/decision-group-index", +} + +func (MandatoryDecisionGroup) SwaggerDoc() map[string]string { + return map_MandatoryDecisionGroup +} + +var map_MandatoryDecisionGroups = map[string]string{ + "": "MandatoryDecisionGroups", + "mandatoryDecisionGroups": "List of the decision groups names or indexes to apply the workload first and fail if workload did not reach successful state. GroupName or GroupIndex must match with the decisionGroups defined in the placement's decisionStrategy", +} + +func (MandatoryDecisionGroups) SwaggerDoc() map[string]string { + return map_MandatoryDecisionGroups +} + +var map_RolloutAll = map[string]string{ + "": "RolloutAll is a RolloutStrategy Type", +} + +func (RolloutAll) SwaggerDoc() map[string]string { + return map_RolloutAll +} + +var map_RolloutProgressive = map[string]string{ + "": "RolloutProgressive is a RolloutStrategy Type", + "maxConcurrency": "MaxConcurrency is the max number of clusters to deploy workload concurrently. The default value for MaxConcurrency is determined from the clustersPerDecisionGroup defined in the placement->DecisionStrategy.", +} + +func (RolloutProgressive) SwaggerDoc() map[string]string { + return map_RolloutProgressive +} + +var map_RolloutProgressivePerGroup = map[string]string{ + "": "RolloutProgressivePerGroup is a RolloutStrategy Type", +} + +func (RolloutProgressivePerGroup) SwaggerDoc() map[string]string { + return map_RolloutProgressivePerGroup +} + +var map_RolloutStrategy = map[string]string{ + "": "Rollout strategy to apply workload to the selected clusters by Placement and DecisionStrategy.", + "type": "Rollout strategy Types are All, Progressive and ProgressivePerGroup 1) All means apply the workload to all clusters in the decision groups at once. 2) Progressive means apply the workload to the selected clusters progressively per cluster. The workload will not be applied to the next cluster unless one of the current applied clusters reach the successful state or timeout. 3) ProgressivePerGroup means apply the workload to decisionGroup clusters progressively per group. The workload will not be applied to the next decisionGroup unless all clusters in the current group reach the successful state or timeout.", + "all": "All define required fields for RolloutStrategy type All", + "progressive": "Progressive define required fields for RolloutStrategy type Progressive", + "progressivePerGroup": "ProgressivePerGroup define required fields for RolloutStrategy type ProgressivePerGroup", +} + +func (RolloutStrategy) SwaggerDoc() map[string]string { + return map_RolloutStrategy +} + +var map_Timeout = map[string]string{ + "": "Timeout to consider while applying the workload.", + "timeout": "Timeout define how long workload applier controller will wait till workload reach successful state in the cluster. Timeout default value is None meaning the workload applier will not proceed apply workload to other clusters if did not reach the successful state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s", +} + +func (Timeout) SwaggerDoc() map[string]string { + return map_Timeout +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml b/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml deleted file mode 100644 index 5ceb4eae9..000000000 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml +++ /dev/null @@ -1,207 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: managedclustersets.cluster.open-cluster-management.io -spec: - group: cluster.open-cluster-management.io - names: - kind: ManagedClusterSet - listKind: ManagedClusterSetList - plural: managedclustersets - shortNames: - - mclset - - mclsets - singular: managedclusterset - preserveUnknownFields: false - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status - name: Empty - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: "ManagedClusterSet defines a group of ManagedClusters that user's - workload can run on. A workload can be defined to deployed on a ManagedClusterSet, - which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet - 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet - 3. The service exposed by the workload can be shared in any ManagedCluster - in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian - ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` - on the ManagedCluster to refers to the ManagedClusterSet. User is not allow - to add/remove this label on a ManagedCluster unless they have a RBAC rule - to CREATE on a virtual subresource of managedclustersets/join. In order - to update this label, user must have the permission on both the old and - new ManagedClusterSet." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - default: - clusterSelector: - selectorType: LegacyClusterSetLabel - description: Spec defines the attributes of the ManagedClusterSet - properties: - clusterSelector: - default: - selectorType: LegacyClusterSetLabel - description: ClusterSelector represents a selector of ManagedClusters - properties: - labelSelector: - description: LabelSelector define the general labelSelector which - clusterset will use to select target managedClusters - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - selectorType: - default: LegacyClusterSetLabel - description: SelectorType could only be "LegacyClusterSetLabel" - or "LabelSelector" "LegacyClusterSetLabel" means to use label - "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use - labelSelector to select target managedClusters - enum: - - LegacyClusterSetLabel - - LabelSelector - type: string - type: object - type: object - status: - description: Status represents the current status of the ManagedClusterSet - properties: - conditions: - description: Conditions contains the different condition statuses - for this ManagedClusterSet. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml b/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml deleted file mode 100644 index 359f13f26..000000000 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml +++ /dev/null @@ -1,136 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: managedclustersetbindings.cluster.open-cluster-management.io -spec: - group: cluster.open-cluster-management.io - names: - kind: ManagedClusterSetBinding - listKind: ManagedClusterSetBindingList - plural: managedclustersetbindings - shortNames: - - mclsetbinding - - mclsetbindings - singular: managedclustersetbinding - preserveUnknownFields: false - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: ManagedClusterSetBinding projects a ManagedClusterSet into a - certain namespace. User is able to create a ManagedClusterSetBinding in - a namespace and bind it to a ManagedClusterSet if they have an RBAC rule - to CREATE on the virtual subresource of managedclustersets/bind. Workloads - created in the same namespace can only be distributed to ManagedClusters - in ManagedClusterSets bound in this namespace by higher level controllers. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of ManagedClusterSetBinding. - properties: - clusterSet: - description: ClusterSet is the name of the ManagedClusterSet to bind. - It must match the instance name of the ManagedClusterSetBinding - and cannot change once created. User is allowed to set this field - if they have an RBAC rule to CREATE on the virtual subresource of - managedclustersets/bind. - minLength: 1 - type: string - type: object - status: - description: Status represents the current status of the ManagedClusterSetBinding - properties: - conditions: - description: Conditions contains the different condition statuses - for this ManagedClusterSetBinding. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml b/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml index a483f8725..f2f9f546e 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml +++ b/vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml @@ -69,6 +69,160 @@ spec: items: type: string type: array + decisionStrategy: + description: DecisionStrategy divide the created placement decision + to groups and define number of clusters per decision group. + properties: + groupStrategy: + description: GroupStrategy define strategies to divide selected + clusters to decision groups. + properties: + clustersPerDecisionGroup: + anyOf: + - type: integer + - type: string + default: 100% + description: "ClustersPerDecisionGroup is a specific number + or percentage of the total selected clusters. The specific + number will divide the placementDecisions to decisionGroups + each group has max number of clusters equal to that specific + number. The percentage will divide the placementDecisions + to decisionGroups each group has max number of clusters + based on the total num of selected clusters and percentage. + ex; for a total 100 clusters selected, ClustersPerDecisionGroup + equal to 20% will divide the placement decision to 5 groups + each group should have 20 clusters. Default is having all + clusters in a single group. \n The predefined decisionGroups + is expected to be a subset of the selected clusters and + the number of items in each group SHOULD be less than ClustersPerDecisionGroup. + Once the number of items exceeds the ClustersPerDecisionGroup, + the decisionGroups will also be be divided into multiple + decisionGroups with same GroupName but different GroupIndex." + pattern: ^((100|[1-9][0-9]{0,1})%|[1-9][0-9]*)$ + x-kubernetes-int-or-string: true + decisionGroups: + description: DecisionGroups represents a list of predefined + groups to put decision results. Decision groups will be + constructed based on the DecisionGroups field at first. + The clusters not included in the DecisionGroups will be + divided to other decision groups afterwards. Each decision + group should not have the number of clusters larger than + the ClustersPerDecisionGroup. + items: + description: DecisionGroup define a subset of clusters that + will be added to placementDecisions with groupName label. + properties: + groupClusterSelector: + description: LabelSelector to select clusters subset + by label. + properties: + claimSelector: + description: ClaimSelector represents a selector + of ManagedClusters by clusterClaims in status + properties: + matchExpressions: + description: matchExpressions is a list of cluster + claim selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + labelSelector: + description: LabelSelector represents a selector + of ManagedClusters by label + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + groupName: + description: Group name to be added as label value to + the created placement Decisions labels with label + key cluster.open-cluster-management.io/decision-group-name + pattern: ^[a-zA-Z0-9][-A-Za-z0-9_.]{0,61}[a-zA-Z0-9]$ + type: string + required: + - groupClusterSelector + - groupName + type: object + type: array + type: object + type: object numberOfClusters: description: NumberOfClusters represents the desired number of ManagedClusters to be selected which meet the placement requirements. 1) If not @@ -466,6 +620,38 @@ spec: - type type: object type: array + decisionGroups: + description: List of decision groups determined by the placement and + DecisionStrategy. + items: + description: Present decision groups status based on the DecisionStrategy + definition. + properties: + clusterCount: + default: 0 + description: Total number of clusters in the decision group. + Clusters count is equal or less than the clusterPerDecisionGroups + defined in the decision strategy. + format: int32 + type: integer + decisionGroupIndex: + description: Present the decision group index. If there is no + decision strategy defined all placement decisions will be + in group index 0 + format: int32 + type: integer + decisionGroupName: + description: Decision group name that is defined in the DecisionStrategy's + DecisionGroup. + type: string + decisions: + description: List of placement decisions names associated with + the decision group + items: + type: string + type: array + type: object + type: array numberOfSelectedClusters: description: NumberOfSelectedClusters represents the number of selected ManagedClusters diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go index 27b5ebdfe..5951ee1c7 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go +++ b/vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go @@ -2,176 +2,261 @@ package v1beta1 import ( "fmt" + "sort" + "strconv" "sync" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" - v1 "open-cluster-management.io/api/cluster/v1" ) -type ManagedClustersGetter interface { - List(selector labels.Selector) (ret []*v1.ManagedCluster, err error) +type PlacementDecisionGetter interface { + List(selector labels.Selector, namespace string) (ret []*PlacementDecision, err error) } -type ManagedClusterSetsGetter interface { - List(selector labels.Selector) (ret []*ManagedClusterSet, err error) +// +k8s:deepcopy-gen=false +type PlacementDecisionClustersTracker struct { + placement *Placement + placementDecisionGetter PlacementDecisionGetter + existingScheduledClusterGroups ClusterGroupsMap + clusterGroupsIndexToName map[int32]string + clusterGroupsNameToIndex map[string][]int32 + lock sync.RWMutex } -type ManagedClusterSetBindingsGetter interface { - List(namespace string, selector labels.Selector) (ret []*ManagedClusterSetBinding, err error) +// +k8s:deepcopy-gen=false +type GroupKey struct { + GroupName string `json:"groupName,omitempty"` + GroupIndex int32 `json:"groupIndex,omitempty"` } -// GetClustersFromClusterSet return the ManagedClusterSet's managedClusters -func GetClustersFromClusterSet(clusterSet *ManagedClusterSet, - clustersGetter ManagedClustersGetter) ([]*v1.ManagedCluster, error) { - var clusters []*v1.ManagedCluster - - if clusterSet == nil { - return nil, nil +// NewPlacementDecisionClustersTracker initializes a PlacementDecisionClustersTracker +// using existing clusters. Clusters are added to the default cluster group with index 0. +// Set existingScheduledClusters to nil if there are no existing clusters. +func NewPlacementDecisionClustersTracker(placement *Placement, pdl PlacementDecisionGetter, existingScheduledClusters sets.Set[string]) *PlacementDecisionClustersTracker { + pdct := &PlacementDecisionClustersTracker{ + placement: placement, + placementDecisionGetter: pdl, + existingScheduledClusterGroups: ClusterGroupsMap{{GroupIndex: 0}: existingScheduledClusters}, } - clusterSelector, err := BuildClusterSelector(clusterSet) - if err != nil { - return nil, err - } - if clusterSelector == nil { - return nil, fmt.Errorf("failed to build ClusterSelector with clusterSet: %v", clusterSet) - } - clusters, err = clustersGetter.List(clusterSelector) - if err != nil { - return nil, fmt.Errorf("failed to list ManagedClusters: %w", err) + // Generate group name indices for the tracker. + pdct.generateGroupsNameIndex() + return pdct +} + +// NewPlacementDecisionClustersTrackerWithGroups initializes a PlacementDecisionClustersTracker +// using existing cluster groups. Set existingScheduledClusterGroups to nil if no groups exist. +func NewPlacementDecisionClustersTrackerWithGroups(placement *Placement, pdl PlacementDecisionGetter, existingScheduledClusterGroups ClusterGroupsMap) *PlacementDecisionClustersTracker { + pdct := &PlacementDecisionClustersTracker{ + placement: placement, + placementDecisionGetter: pdl, + existingScheduledClusterGroups: existingScheduledClusterGroups, } - return clusters, nil + + // Generate group name indices for the tracker. + pdct.generateGroupsNameIndex() + return pdct } -// GetClusterSetsOfClusterByCluster return the managedClusterSets of a managedCluster -func GetClusterSetsOfCluster(cluster *v1.ManagedCluster, - clusterSetsGetter ManagedClusterSetsGetter) ([]*ManagedClusterSet, error) { - var returnClusterSets []*ManagedClusterSet +// Get updates the tracker's decisionClusters and returns added and deleted cluster names. +func (pdct *PlacementDecisionClustersTracker) Get() (sets.Set[string], sets.Set[string], error) { + pdct.lock.Lock() + defer pdct.lock.Unlock() - if cluster == nil { - return nil, nil + if pdct.placement == nil || pdct.placementDecisionGetter == nil { + return nil, nil, nil } - allClusterSets, err := clusterSetsGetter.List(labels.Everything()) + // Get the generated PlacementDecisions + decisionSelector := labels.SelectorFromSet(labels.Set{ + PlacementLabel: pdct.placement.Name, + }) + decisions, err := pdct.placementDecisionGetter.List(decisionSelector, pdct.placement.Namespace) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("failed to list PlacementDecisions: %w", err) } - for _, clusterSet := range allClusterSets { - clusterSelector, err := BuildClusterSelector(clusterSet) + + // Get the decision cluster names and groups + newScheduledClusters := sets.New[string]() + newScheduledClusterGroups := map[GroupKey]sets.Set[string]{} + for _, d := range decisions { + groupKey, err := parseGroupKeyFromDecision(d) if err != nil { - return nil, err + return nil, nil, err } - if clusterSelector == nil { - return nil, fmt.Errorf("failed to build ClusterSelector with clusterSet: %v", clusterSet) + + if _, exist := newScheduledClusterGroups[groupKey]; !exist { + newScheduledClusterGroups[groupKey] = sets.New[string]() } - if clusterSelector.Matches(labels.Set(cluster.Labels)) { - returnClusterSets = append(returnClusterSets, clusterSet) + + for _, sd := range d.Status.Decisions { + newScheduledClusters.Insert(sd.ClusterName) + newScheduledClusterGroups[groupKey].Insert(sd.ClusterName) } } - return returnClusterSets, nil + + // Compare the difference + existingScheduledClusters := pdct.existingScheduledClusterGroups.GetClusters() + added := newScheduledClusters.Difference(existingScheduledClusters) + deleted := existingScheduledClusters.Difference(newScheduledClusters) + + // Update the existing decision cluster groups + pdct.existingScheduledClusterGroups = newScheduledClusterGroups + pdct.generateGroupsNameIndex() + + return added, deleted, nil } -func BuildClusterSelector(clusterSet *ManagedClusterSet) (labels.Selector, error) { - if clusterSet == nil { - return nil, nil +func (pdct *PlacementDecisionClustersTracker) generateGroupsNameIndex() { + pdct.clusterGroupsIndexToName = map[int32]string{} + pdct.clusterGroupsNameToIndex = map[string][]int32{} + + for groupkey := range pdct.existingScheduledClusterGroups { + // index to name + pdct.clusterGroupsIndexToName[groupkey.GroupIndex] = groupkey.GroupName + // name to index + if index, exist := pdct.clusterGroupsNameToIndex[groupkey.GroupName]; exist { + pdct.clusterGroupsNameToIndex[groupkey.GroupName] = append(index, groupkey.GroupIndex) + } else { + pdct.clusterGroupsNameToIndex[groupkey.GroupName] = []int32{groupkey.GroupIndex} + } } - selectorType := clusterSet.Spec.ClusterSelector.SelectorType - switch selectorType { - case "", LegacyClusterSetLabel: - return labels.SelectorFromSet(labels.Set{ - ClusterSetLabel: clusterSet.Name, - }), nil - case LabelSelector: - return metav1.LabelSelectorAsSelector(clusterSet.Spec.ClusterSelector.LabelSelector) - default: - return nil, fmt.Errorf("selectorType is not right: %s", clusterSet.Spec.ClusterSelector.SelectorType) + // sort index order + for _, index := range pdct.clusterGroupsNameToIndex { + sort.Slice(index, func(i, j int) bool { + return index[i] < index[j] + }) } } -// GetBoundManagedClusterSetBindings returns all bindings that are bounded to clustersets in the given namespace. -func GetBoundManagedClusterSetBindings(namespace string, - clusterSetBindingsGetter ManagedClusterSetBindingsGetter) ([]*ManagedClusterSetBinding, error) { - // get all clusterset bindings under the namespace - bindings, err := clusterSetBindingsGetter.List(namespace, labels.Everything()) - if err != nil { - return nil, err - } +// ExistingClusterGroups returns the tracker's existing decision cluster groups for groups listed in groupKeys. +// Return empty set when groupKeys is empty. +func (pdct *PlacementDecisionClustersTracker) ExistingClusterGroups(groupKeys ...GroupKey) ClusterGroupsMap { + pdct.lock.RLock() + defer pdct.lock.RUnlock() + + resultClusterGroups := make(map[GroupKey]sets.Set[string]) - boundBindings := []*ManagedClusterSetBinding{} - for _, binding := range bindings { - if meta.IsStatusConditionTrue(binding.Status.Conditions, ClusterSetBindingBoundType) { - boundBindings = append(boundBindings, binding) + includeGroupKeys := pdct.fulfillGroupKeys(groupKeys) + for _, groupKey := range includeGroupKeys { + if clusters, found := pdct.existingScheduledClusterGroups[groupKey]; found { + resultClusterGroups[groupKey] = clusters } } - return boundBindings, nil + return resultClusterGroups } -type PlacementDecisionGetter interface { - List(selector labels.Selector, namespace string) (ret []*PlacementDecision, err error) +// ExistingClusterGroupsBesides returns the tracker's existing decision cluster groups except cluster groups listed in groupKeys. +// Return all the clusters when groupKeys is empty. +func (pdct *PlacementDecisionClustersTracker) ExistingClusterGroupsBesides(groupKeys ...GroupKey) ClusterGroupsMap { + pdct.lock.RLock() + defer pdct.lock.RUnlock() + + resultClusterGroups := make(map[GroupKey]sets.Set[string]) + + excludeGroupKeys := pdct.fulfillGroupKeys(groupKeys) + includeGroupKeys := pdct.getGroupKeysBesides(excludeGroupKeys) + for _, groupKey := range includeGroupKeys { + if clusters, found := pdct.existingScheduledClusterGroups[groupKey]; found { + resultClusterGroups[groupKey] = clusters + } + } + + return resultClusterGroups } -// +k8s:deepcopy-gen=false -type PlacementDecisionClustersTracker struct { - placement *Placement - placementDecisionGetter PlacementDecisionGetter - existingScheduledClusters sets.Set[string] - lock sync.RWMutex +// Fulfill the expect groupkeys with group name or group index, the returned groupkeys are ordered by input group name then group index. +// For example, the input is []GroupKey{{GroupName: "group1"}, {GroupIndex: 2}}, +// the returned is []GroupKey{{GroupName: "group1", GroupIndex: 0}, {GroupName: "group1", GroupIndex: 1}, {GroupName: "group2", GroupIndex: 2}} +func (pdct *PlacementDecisionClustersTracker) fulfillGroupKeys(groupKeys []GroupKey) []GroupKey { + fulfilledGroupKeys := []GroupKey{} + for _, gk := range groupKeys { + if gk.GroupName != "" { + if indexes, exist := pdct.clusterGroupsNameToIndex[gk.GroupName]; exist { + for _, groupIndex := range indexes { + fulfilledGroupKeys = append(fulfilledGroupKeys, GroupKey{GroupName: gk.GroupName, GroupIndex: groupIndex}) + } + } + } else { + if groupName, exist := pdct.clusterGroupsIndexToName[gk.GroupIndex]; exist { + fulfilledGroupKeys = append(fulfilledGroupKeys, GroupKey{GroupName: groupName, GroupIndex: gk.GroupIndex}) + } + } + } + return fulfilledGroupKeys } -func NewPlacementDecisionClustersTracker(placement *Placement, pdl PlacementDecisionGetter, existingScheduledClusters sets.Set[string]) *PlacementDecisionClustersTracker { - pdct := &PlacementDecisionClustersTracker{ - placement: placement, - placementDecisionGetter: pdl, - existingScheduledClusters: existingScheduledClusters, +func (pdct *PlacementDecisionClustersTracker) getGroupKeysBesides(groupKeyToExclude []GroupKey) []GroupKey { + groupKey := []GroupKey{} + for i := 0; i < len(pdct.clusterGroupsIndexToName); i++ { + gKey := GroupKey{GroupName: pdct.clusterGroupsIndexToName[int32(i)], GroupIndex: int32(i)} + if !containsGroupKey(groupKeyToExclude, gKey) { + groupKey = append(groupKey, gKey) + } } - return pdct + + return groupKey } -// Get() update the tracker's decisionClusters and return the added and deleted cluster names. -func (pdct *PlacementDecisionClustersTracker) Get() (sets.Set[string], sets.Set[string], error) { - pdct.lock.Lock() - defer pdct.lock.Unlock() +// ClusterGroupsMap is a custom type representing a map of group keys to sets of cluster names. +type ClusterGroupsMap map[GroupKey]sets.Set[string] - if pdct.placement == nil || pdct.placementDecisionGetter == nil { - return nil, nil, nil +// GetOrderedGroupKeys returns an ordered slice of GroupKeys, sorted by group index. +func (g ClusterGroupsMap) GetOrderedGroupKeys() []GroupKey { + groupKeys := []GroupKey{} + for groupKey := range g { + groupKeys = append(groupKeys, groupKey) } - // Get the generated PlacementDecisions - decisionSelector := labels.SelectorFromSet(labels.Set{ - PlacementLabel: pdct.placement.Name, + // sort by group index index + sort.Slice(groupKeys, func(i, j int) bool { + return groupKeys[i].GroupIndex < groupKeys[j].GroupIndex }) - decisions, err := pdct.placementDecisionGetter.List(decisionSelector, pdct.placement.Namespace) - if err != nil { - return nil, nil, fmt.Errorf("failed to list PlacementDecisions: %w", err) - } - // Get the decision cluster names - newScheduledClusters := sets.New[string]() - for _, d := range decisions { - for _, sd := range d.Status.Decisions { - newScheduledClusters.Insert(sd.ClusterName) - } + return groupKeys +} + +// GetClusters returns a set containing all clusters from all group sets. +func (g ClusterGroupsMap) GetClusters() sets.Set[string] { + clusterSet := sets.New[string]() + for _, clusterGroup := range g { + clusterSet = clusterSet.Union(clusterGroup) } + return clusterSet +} - // Compare the difference - added := newScheduledClusters.Difference(pdct.existingScheduledClusters) - deleted := pdct.existingScheduledClusters.Difference(newScheduledClusters) +// ClusterToGroupKey returns a mapping of cluster names to their respective group keys. +func (g ClusterGroupsMap) ClusterToGroupKey() map[string]GroupKey { + clusterToGroupKey := map[string]GroupKey{} - // Update the existing decision cluster names - pdct.existingScheduledClusters = newScheduledClusters + for groupKey, clusterGroup := range g { + for c := range clusterGroup { + clusterToGroupKey[c] = groupKey + } + } - return added, deleted, nil + return clusterToGroupKey } -// Existing() returns the tracker's existing decision cluster names. -func (pdct *PlacementDecisionClustersTracker) Existing() sets.Set[string] { - pdct.lock.RLock() - defer pdct.lock.RUnlock() +// Helper function to check if a groupKey is present in the groupKeys slice. +func containsGroupKey(groupKeys []GroupKey, groupKey GroupKey) bool { + for _, gk := range groupKeys { + if gk == groupKey { + return true + } + } + return false +} - return pdct.existingScheduledClusters +func parseGroupKeyFromDecision(d *PlacementDecision) (GroupKey, error) { + groupName := d.Labels[DecisionGroupNameLabel] + groupIndex := d.Labels[DecisionGroupIndexLabel] + groupIndexNum, err := strconv.Atoi(groupIndex) + if err != nil { + return GroupKey{}, fmt.Errorf("incorrect group index: %w", err) + } + return GroupKey{GroupName: groupName, GroupIndex: int32(groupIndexNum)}, nil } diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/register.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/register.go index 16a33d15b..0f9156d26 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/register.go +++ b/vendor/open-cluster-management.io/api/cluster/v1beta1/register.go @@ -30,10 +30,6 @@ func Resource(resource string) schema.GroupResource { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, - &ManagedClusterSet{}, - &ManagedClusterSetList{}, - &ManagedClusterSetBinding{}, - &ManagedClusterSetBindingList{}, &Placement{}, &PlacementList{}, &PlacementDecision{}, diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/types_managedclusterset.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/types_managedclusterset.go deleted file mode 100644 index 2bc43f9e0..000000000 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/types_managedclusterset.go +++ /dev/null @@ -1,99 +0,0 @@ -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// LegacyClusterSetLabel LabelKey -const ClusterSetLabel = "cluster.open-cluster-management.io/clusterset" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope="Cluster",shortName={"mclset","mclsets"} -// +kubebuilder:storageversion -// +kubebuilder:printcolumn:name="Empty",type="string",JSONPath=".status.conditions[?(@.type==\"ClusterSetEmpty\")].status" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" - -// ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. -// A workload can be defined to deployed on a ManagedClusterSet, which mean: -// 1. The workload can run on any ManagedCluster in the ManagedClusterSet -// 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet -// 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet -// -// In order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name -// `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. -// User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on -// a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission -// on both the old and new ManagedClusterSet. -type ManagedClusterSet struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the attributes of the ManagedClusterSet - // +kubebuilder:default={clusterSelector: {selectorType: LegacyClusterSetLabel}} - Spec ManagedClusterSetSpec `json:"spec"` - - // Status represents the current status of the ManagedClusterSet - // +optional - Status ManagedClusterSetStatus `json:"status,omitempty"` -} - -// ManagedClusterSetSpec describes the attributes of the ManagedClusterSet -type ManagedClusterSetSpec struct { - // ClusterSelector represents a selector of ManagedClusters - // +optional - // +kubebuilder:default:={selectorType: LegacyClusterSetLabel} - ClusterSelector ManagedClusterSelector `json:"clusterSelector,omitempty"` -} - -// ManagedClusterSelector represents a selector of ManagedClusters -type ManagedClusterSelector struct { - // SelectorType could only be "LegacyClusterSetLabel" or "LabelSelector" - // "LegacyClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. - // "LabelSelector" means use labelSelector to select target managedClusters - // +kubebuilder:validation:Enum=LegacyClusterSetLabel;LabelSelector - // +kubebuilder:default:=LegacyClusterSetLabel - // +required - SelectorType SelectorType `json:"selectorType,omitempty"` - - // LabelSelector define the general labelSelector which clusterset will use to select target managedClusters - // +optional - LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` -} - -type SelectorType string - -const ( - // "LegacyClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. - LegacyClusterSetLabel SelectorType = "LegacyClusterSetLabel" - // "LabelSelector" means use labelSelector to select target managedClusters - LabelSelector SelectorType = "LabelSelector" -) - -// ManagedClusterSetStatus represents the current status of the ManagedClusterSet. -type ManagedClusterSetStatus struct { - // Conditions contains the different condition statuses for this ManagedClusterSet. - Conditions []metav1.Condition `json:"conditions"` -} - -const ( - // ManagedClusterSetConditionEmpty means no ManagedCluster is included in the - // ManagedClusterSet. - ManagedClusterSetConditionEmpty string = "ClusterSetEmpty" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ManagedClusterSetList is a collection of ManagedClusterSet. -type ManagedClusterSetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // Items is a list of ManagedClusterSet. - Items []ManagedClusterSet `json:"items"` -} diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/types_managedclustersetbinding.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/types_managedclustersetbinding.go deleted file mode 100644 index fed2c17d1..000000000 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/types_managedclustersetbinding.go +++ /dev/null @@ -1,65 +0,0 @@ -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope="Namespaced",shortName={"mclsetbinding","mclsetbindings"} -// +kubebuilder:storageversion - -// ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. -// User is able to create a ManagedClusterSetBinding in a namespace and bind it to a -// ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of -// managedclustersets/bind. Workloads created in the same namespace can only be -// distributed to ManagedClusters in ManagedClusterSets bound in this namespace by -// higher level controllers. -type ManagedClusterSetBinding struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the attributes of ManagedClusterSetBinding. - Spec ManagedClusterSetBindingSpec `json:"spec"` - - // Status represents the current status of the ManagedClusterSetBinding - // +optional - Status ManagedClusterSetBindingStatus `json:"status,omitempty"` -} - -// ManagedClusterSetBindingSpec defines the attributes of ManagedClusterSetBinding. -type ManagedClusterSetBindingSpec struct { - // ClusterSet is the name of the ManagedClusterSet to bind. It must match the - // instance name of the ManagedClusterSetBinding and cannot change once created. - // User is allowed to set this field if they have an RBAC rule to CREATE on the - // virtual subresource of managedclustersets/bind. - // +kubebuilder:validation:MinLength=1 - ClusterSet string `json:"clusterSet"` -} - -const ( - // ClusterSetBindingBoundType is a condition type of clustersetbinding representing - // whether the ClusterSetBinding is bound to a clusterset. - ClusterSetBindingBoundType = "Bound" -) - -// ManagedClusterSetBindingStatus represents the current status of the ManagedClusterSetBinding. -type ManagedClusterSetBindingStatus struct { - // Conditions contains the different condition statuses for this ManagedClusterSetBinding. - Conditions []metav1.Condition `json:"conditions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ManagedClusterSetBindingList is a collection of ManagedClusterSetBinding. -type ManagedClusterSetBindingList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // Items is a list of ManagedClusterSetBinding. - Items []ManagedClusterSetBinding `json:"items"` -} diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placement.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placement.go index 9f4aabd6e..9065b597b 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placement.go +++ b/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placement.go @@ -2,6 +2,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" v1 "open-cluster-management.io/api/cluster/v1" ) @@ -94,6 +95,60 @@ type PlacementSpec struct { // certain taints to be selected by placements with matching tolerations. // +optional Tolerations []Toleration `json:"tolerations,omitempty"` + + // DecisionStrategy divide the created placement decision to groups and define number of clusters per decision group. + // +optional + DecisionStrategy DecisionStrategy `json:"decisionStrategy,omitempty"` +} + +// DecisionGroup define a subset of clusters that will be added to placementDecisions with groupName label. +type DecisionGroup struct { + // Group name to be added as label value to the created placement Decisions labels with label key cluster.open-cluster-management.io/decision-group-name + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern="^[a-zA-Z0-9][-A-Za-z0-9_.]{0,61}[a-zA-Z0-9]$" + // +required + GroupName string `json:"groupName,omitempty"` + + // LabelSelector to select clusters subset by label. + // +kubebuilder:validation:Required + // +required + ClusterSelector ClusterSelector `json:"groupClusterSelector,omitempty"` +} + +// Group the created placementDecision into decision groups based on the number of clusters per decision group. +type GroupStrategy struct { + // DecisionGroups represents a list of predefined groups to put decision results. + // Decision groups will be constructed based on the DecisionGroups field at first. The clusters not included in the + // DecisionGroups will be divided to other decision groups afterwards. Each decision group should not have the number + // of clusters larger than the ClustersPerDecisionGroup. + // +optional + DecisionGroups []DecisionGroup `json:"decisionGroups,omitempty"` + + // ClustersPerDecisionGroup is a specific number or percentage of the total selected clusters. + // The specific number will divide the placementDecisions to decisionGroups each group has max number of clusters + // equal to that specific number. + // The percentage will divide the placementDecisions to decisionGroups each group has max number of clusters based + // on the total num of selected clusters and percentage. + // ex; for a total 100 clusters selected, ClustersPerDecisionGroup equal to 20% will divide the placement decision + // to 5 groups each group should have 20 clusters. + // Default is having all clusters in a single group. + // + // The predefined decisionGroups is expected to be a subset of the selected clusters and the number of items in each + // group SHOULD be less than ClustersPerDecisionGroup. Once the number of items exceeds the ClustersPerDecisionGroup, + // the decisionGroups will also be be divided into multiple decisionGroups with same GroupName but different GroupIndex. + // + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:Pattern=`^((100|[1-9][0-9]{0,1})%|[1-9][0-9]*)$` + // +kubebuilder:default:="100%" + // +optional + ClustersPerDecisionGroup intstr.IntOrString `json:"clustersPerDecisionGroup,omitempty"` +} + +// DecisionStrategy divide the created placement decision to groups and define number of clusters per decision group. +type DecisionStrategy struct { + // GroupStrategy define strategies to divide selected clusters to decision groups. + // +optional + GroupStrategy GroupStrategy `json:"groupStrategy,omitempty"` } // ClusterPredicate represents a predicate to select ManagedClusters. @@ -329,11 +384,35 @@ const ( TolerationOpEqual TolerationOperator = "Equal" ) +// Present decision groups status based on the DecisionStrategy definition. +type DecisionGroupStatus struct { + // Present the decision group index. If there is no decision strategy defined all placement decisions will be in group index 0 + // +optional + DecisionGroupIndex int32 `json:"decisionGroupIndex"` + + // Decision group name that is defined in the DecisionStrategy's DecisionGroup. + // +optional + DecisionGroupName string `json:"decisionGroupName"` + + // List of placement decisions names associated with the decision group + // +optional + Decisions []string `json:"decisions"` + + // Total number of clusters in the decision group. Clusters count is equal or less than the clusterPerDecisionGroups defined in the decision strategy. + // +kubebuilder:default:=0 + // +optional + ClustersCount int32 `json:"clusterCount"` +} + type PlacementStatus struct { // NumberOfSelectedClusters represents the number of selected ManagedClusters // +optional NumberOfSelectedClusters int32 `json:"numberOfSelectedClusters"` + // List of decision groups determined by the placement and DecisionStrategy. + // +optional + DecisionGroups []DecisionGroupStatus `json:"decisionGroups"` + // Conditions contains the different condition status for this Placement. // +optional Conditions []metav1.Condition `json:"conditions"` diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placementdecision.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placementdecision.go index e9072c8d6..b32b11e9b 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placementdecision.go +++ b/vendor/open-cluster-management.io/api/cluster/v1beta1/types_placementdecision.go @@ -26,9 +26,14 @@ type PlacementDecision struct { Status PlacementDecisionStatus `json:"status,omitempty"` } -// The placementDecsion label name holding the placement name +// The placementDecsion labels const ( + // Placement owner name. PlacementLabel string = "cluster.open-cluster-management.io/placement" + // decision group index. + DecisionGroupIndexLabel string = "cluster.open-cluster-management.io/decision-group-index" + // decision group name. + DecisionGroupNameLabel string = "cluster.open-cluster-management.io/decision-group-name" ) // PlacementDecisionStatus represents the current status of the PlacementDecision. diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.deepcopy.go index f7a6f2f68..347b89457 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.deepcopy.go @@ -8,6 +8,7 @@ package v1beta1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + sets "k8s.io/apimachinery/pkg/util/sets" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -65,6 +66,38 @@ func (in *ClusterDecision) DeepCopy() *ClusterDecision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ClusterGroupsMap) DeepCopyInto(out *ClusterGroupsMap) { + { + in := &in + *out = make(ClusterGroupsMap, len(*in)) + for key, val := range *in { + var outVal map[string]sets.Empty + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(sets.Set[string], len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterGroupsMap. +func (in ClusterGroupsMap) DeepCopy() ClusterGroupsMap { + if in == nil { + return nil + } + out := new(ClusterGroupsMap) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterPredicate) DeepCopyInto(out *ClusterPredicate) { *out = *in @@ -101,223 +134,80 @@ func (in *ClusterSelector) DeepCopy() *ClusterSelector { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSelector) DeepCopyInto(out *ManagedClusterSelector) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSelector. -func (in *ManagedClusterSelector) DeepCopy() *ManagedClusterSelector { - if in == nil { - return nil - } - out := new(ManagedClusterSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSet) DeepCopyInto(out *ManagedClusterSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSet. -func (in *ManagedClusterSet) DeepCopy() *ManagedClusterSet { - if in == nil { - return nil - } - out := new(ManagedClusterSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetBinding) DeepCopyInto(out *ManagedClusterSetBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBinding. -func (in *ManagedClusterSetBinding) DeepCopy() *ManagedClusterSetBinding { - if in == nil { - return nil - } - out := new(ManagedClusterSetBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterSetBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetBindingList) DeepCopyInto(out *ManagedClusterSetBindingList) { +func (in *DecisionGroup) DeepCopyInto(out *DecisionGroup) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ManagedClusterSetBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingList. -func (in *ManagedClusterSetBindingList) DeepCopy() *ManagedClusterSetBindingList { - if in == nil { - return nil - } - out := new(ManagedClusterSetBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterSetBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetBindingSpec) DeepCopyInto(out *ManagedClusterSetBindingSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingSpec. -func (in *ManagedClusterSetBindingSpec) DeepCopy() *ManagedClusterSetBindingSpec { - if in == nil { - return nil - } - out := new(ManagedClusterSetBindingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetBindingStatus) DeepCopyInto(out *ManagedClusterSetBindingStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.ClusterSelector.DeepCopyInto(&out.ClusterSelector) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetBindingStatus. -func (in *ManagedClusterSetBindingStatus) DeepCopy() *ManagedClusterSetBindingStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecisionGroup. +func (in *DecisionGroup) DeepCopy() *DecisionGroup { if in == nil { return nil } - out := new(ManagedClusterSetBindingStatus) + out := new(DecisionGroup) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetList) DeepCopyInto(out *ManagedClusterSetList) { +func (in *DecisionGroupStatus) DeepCopyInto(out *DecisionGroupStatus) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ManagedClusterSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.Decisions != nil { + in, out := &in.Decisions, &out.Decisions + *out = make([]string, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetList. -func (in *ManagedClusterSetList) DeepCopy() *ManagedClusterSetList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecisionGroupStatus. +func (in *DecisionGroupStatus) DeepCopy() *DecisionGroupStatus { if in == nil { return nil } - out := new(ManagedClusterSetList) + out := new(DecisionGroupStatus) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetSpec) DeepCopyInto(out *ManagedClusterSetSpec) { +func (in *DecisionStrategy) DeepCopyInto(out *DecisionStrategy) { *out = *in - in.ClusterSelector.DeepCopyInto(&out.ClusterSelector) + in.GroupStrategy.DeepCopyInto(&out.GroupStrategy) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetSpec. -func (in *ManagedClusterSetSpec) DeepCopy() *ManagedClusterSetSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DecisionStrategy. +func (in *DecisionStrategy) DeepCopy() *DecisionStrategy { if in == nil { return nil } - out := new(ManagedClusterSetSpec) + out := new(DecisionStrategy) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSetStatus) DeepCopyInto(out *ManagedClusterSetStatus) { +func (in *GroupStrategy) DeepCopyInto(out *GroupStrategy) { *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + if in.DecisionGroups != nil { + in, out := &in.DecisionGroups, &out.DecisionGroups + *out = make([]DecisionGroup, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + out.ClustersPerDecisionGroup = in.ClustersPerDecisionGroup return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSetStatus. -func (in *ManagedClusterSetStatus) DeepCopy() *ManagedClusterSetStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupStrategy. +func (in *GroupStrategy) DeepCopy() *GroupStrategy { if in == nil { return nil } - out := new(ManagedClusterSetStatus) + out := new(GroupStrategy) in.DeepCopyInto(out) return out } @@ -493,6 +383,7 @@ func (in *PlacementSpec) DeepCopyInto(out *PlacementSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.DecisionStrategy.DeepCopyInto(&out.DecisionStrategy) return } @@ -509,6 +400,13 @@ func (in *PlacementSpec) DeepCopy() *PlacementSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlacementStatus) DeepCopyInto(out *PlacementStatus) { *out = *in + if in.DecisionGroups != nil { + in, out := &in.DecisionGroups, &out.DecisionGroups + *out = make([]DecisionGroupStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]v1.Condition, len(*in)) diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.swagger_doc_generated.go index b8c70231c..bc134648b 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.swagger_doc_generated.go @@ -11,92 +11,6 @@ package v1beta1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_ManagedClusterSelector = map[string]string{ - "": "ManagedClusterSelector represents a selector of ManagedClusters", - "selectorType": "SelectorType could only be \"LegacyClusterSetLabel\" or \"LabelSelector\" \"LegacyClusterSetLabel\" means to use label \"cluster.open-cluster-management.io/clusterset:\"\" to select target clusters. \"LabelSelector\" means use labelSelector to select target managedClusters", - "labelSelector": "LabelSelector define the general labelSelector which clusterset will use to select target managedClusters", -} - -func (ManagedClusterSelector) SwaggerDoc() map[string]string { - return map_ManagedClusterSelector -} - -var map_ManagedClusterSet = map[string]string{ - "": "ManagedClusterSet defines a group of ManagedClusters that user's workload can run on. A workload can be defined to deployed on a ManagedClusterSet, which mean:\n 1. The workload can run on any ManagedCluster in the ManagedClusterSet\n 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet\n 3. The service exposed by the workload can be shared in any ManagedCluster in the ManagedClusterSet\n\nIn order to assign a ManagedCluster to a certian ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` on the ManagedCluster to refers to the ManagedClusterSet. User is not allow to add/remove this label on a ManagedCluster unless they have a RBAC rule to CREATE on a virtual subresource of managedclustersets/join. In order to update this label, user must have the permission on both the old and new ManagedClusterSet.", - "spec": "Spec defines the attributes of the ManagedClusterSet", - "status": "Status represents the current status of the ManagedClusterSet", -} - -func (ManagedClusterSet) SwaggerDoc() map[string]string { - return map_ManagedClusterSet -} - -var map_ManagedClusterSetList = map[string]string{ - "": "ManagedClusterSetList is a collection of ManagedClusterSet.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ManagedClusterSet.", -} - -func (ManagedClusterSetList) SwaggerDoc() map[string]string { - return map_ManagedClusterSetList -} - -var map_ManagedClusterSetSpec = map[string]string{ - "": "ManagedClusterSetSpec describes the attributes of the ManagedClusterSet", - "clusterSelector": "ClusterSelector represents a selector of ManagedClusters", -} - -func (ManagedClusterSetSpec) SwaggerDoc() map[string]string { - return map_ManagedClusterSetSpec -} - -var map_ManagedClusterSetStatus = map[string]string{ - "": "ManagedClusterSetStatus represents the current status of the ManagedClusterSet.", - "conditions": "Conditions contains the different condition statuses for this ManagedClusterSet.", -} - -func (ManagedClusterSetStatus) SwaggerDoc() map[string]string { - return map_ManagedClusterSetStatus -} - -var map_ManagedClusterSetBinding = map[string]string{ - "": "ManagedClusterSetBinding projects a ManagedClusterSet into a certain namespace. User is able to create a ManagedClusterSetBinding in a namespace and bind it to a ManagedClusterSet if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind. Workloads created in the same namespace can only be distributed to ManagedClusters in ManagedClusterSets bound in this namespace by higher level controllers.", - "spec": "Spec defines the attributes of ManagedClusterSetBinding.", - "status": "Status represents the current status of the ManagedClusterSetBinding", -} - -func (ManagedClusterSetBinding) SwaggerDoc() map[string]string { - return map_ManagedClusterSetBinding -} - -var map_ManagedClusterSetBindingList = map[string]string{ - "": "ManagedClusterSetBindingList is a collection of ManagedClusterSetBinding.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ManagedClusterSetBinding.", -} - -func (ManagedClusterSetBindingList) SwaggerDoc() map[string]string { - return map_ManagedClusterSetBindingList -} - -var map_ManagedClusterSetBindingSpec = map[string]string{ - "": "ManagedClusterSetBindingSpec defines the attributes of ManagedClusterSetBinding.", - "clusterSet": "ClusterSet is the name of the ManagedClusterSet to bind. It must match the instance name of the ManagedClusterSetBinding and cannot change once created. User is allowed to set this field if they have an RBAC rule to CREATE on the virtual subresource of managedclustersets/bind.", -} - -func (ManagedClusterSetBindingSpec) SwaggerDoc() map[string]string { - return map_ManagedClusterSetBindingSpec -} - -var map_ManagedClusterSetBindingStatus = map[string]string{ - "": "ManagedClusterSetBindingStatus represents the current status of the ManagedClusterSetBinding.", - "conditions": "Conditions contains the different condition statuses for this ManagedClusterSetBinding.", -} - -func (ManagedClusterSetBindingStatus) SwaggerDoc() map[string]string { - return map_ManagedClusterSetBindingStatus -} - var map_AddOnScore = map[string]string{ "": "AddOnScore represents the configuration of the addon score source.", "resourceName": "ResourceName defines the resource name of the AddOnPlacementScore. The placement prioritizer selects AddOnPlacementScore CR by this name.", @@ -135,6 +49,47 @@ func (ClusterSelector) SwaggerDoc() map[string]string { return map_ClusterSelector } +var map_DecisionGroup = map[string]string{ + "": "DecisionGroup define a subset of clusters that will be added to placementDecisions with groupName label.", + "groupName": "Group name to be added as label value to the created placement Decisions labels with label key cluster.open-cluster-management.io/decision-group-name", + "groupClusterSelector": "LabelSelector to select clusters subset by label.", +} + +func (DecisionGroup) SwaggerDoc() map[string]string { + return map_DecisionGroup +} + +var map_DecisionGroupStatus = map[string]string{ + "": "Present decision groups status based on the DecisionStrategy definition.", + "decisionGroupIndex": "Present the decision group index. If there is no decision strategy defined all placement decisions will be in group index 0", + "decisionGroupName": "Decision group name that is defined in the DecisionStrategy's DecisionGroup.", + "decisions": "List of placement decisions names associated with the decision group", + "clusterCount": "Total number of clusters in the decision group. Clusters count is equal or less than the clusterPerDecisionGroups defined in the decision strategy.", +} + +func (DecisionGroupStatus) SwaggerDoc() map[string]string { + return map_DecisionGroupStatus +} + +var map_DecisionStrategy = map[string]string{ + "": "DecisionStrategy divide the created placement decision to groups and define number of clusters per decision group.", + "groupStrategy": "GroupStrategy define strategies to divide selected clusters to decision groups.", +} + +func (DecisionStrategy) SwaggerDoc() map[string]string { + return map_DecisionStrategy +} + +var map_GroupStrategy = map[string]string{ + "": "Group the created placementDecision into decision groups based on the number of clusters per decision group.", + "decisionGroups": "DecisionGroups represents a list of predefined groups to put decision results. Decision groups will be constructed based on the DecisionGroups field at first. The clusters not included in the DecisionGroups will be divided to other decision groups afterwards. Each decision group should not have the number of clusters larger than the ClustersPerDecisionGroup.", + "clustersPerDecisionGroup": "ClustersPerDecisionGroup is a specific number or percentage of the total selected clusters. The specific number will divide the placementDecisions to decisionGroups each group has max number of clusters equal to that specific number. The percentage will divide the placementDecisions to decisionGroups each group has max number of clusters based on the total num of selected clusters and percentage. ex; for a total 100 clusters selected, ClustersPerDecisionGroup equal to 20% will divide the placement decision to 5 groups each group should have 20 clusters. Default is having all clusters in a single group.\n\nThe predefined decisionGroups is expected to be a subset of the selected clusters and the number of items in each group SHOULD be less than ClustersPerDecisionGroup. Once the number of items exceeds the ClustersPerDecisionGroup, the decisionGroups will also be be divided into multiple decisionGroups with same GroupName but different GroupIndex.", +} + +func (GroupStrategy) SwaggerDoc() map[string]string { + return map_GroupStrategy +} + var map_Placement = map[string]string{ "": "Placement defines a rule to select a set of ManagedClusters from the ManagedClusterSets bound to the placement namespace.\n\nHere is how the placement policy combines with other selection methods to determine a matching list of ManagedClusters:\n 1. Kubernetes clusters are registered with hub as cluster-scoped ManagedClusters;\n 2. ManagedClusters are organized into cluster-scoped ManagedClusterSets;\n 3. ManagedClusterSets are bound to workload namespaces;\n 4. Namespace-scoped Placements specify a slice of ManagedClusterSets which select a working set\n of potential ManagedClusters;\n 5. Then Placements subselect from that working set using label/claim selection.\n\nNo ManagedCluster will be selected if no ManagedClusterSet is bound to the placement namespace. User is able to bind a ManagedClusterSet to a namespace by creating a ManagedClusterSetBinding in that namespace if they have a RBAC rule to CREATE on the virtual subresource of `managedclustersets/bind`.\n\nA slice of PlacementDecisions with label cluster.open-cluster-management.io/placement={placement name} will be created to represent the ManagedClusters selected by this placement.\n\nIf a ManagedCluster is selected and added into the PlacementDecisions, other components may apply workload on it; once it is removed from the PlacementDecisions, the workload applied on this ManagedCluster should be evicted accordingly.", "spec": "Spec defines the attributes of Placement.", @@ -163,6 +118,7 @@ var map_PlacementSpec = map[string]string{ "prioritizerPolicy": "PrioritizerPolicy defines the policy of the prioritizers. If this field is unset, then default prioritizer mode and configurations are used. Referring to PrioritizerPolicy to see more description about Mode and Configurations.", "spreadPolicy": "SpreadPolicy defines how placement decisions should be distributed among a set of ManagedClusters.", "tolerations": "Tolerations are applied to placements, and allow (but do not require) the managed clusters with certain taints to be selected by placements with matching tolerations.", + "decisionStrategy": "DecisionStrategy divide the created placement decision to groups and define number of clusters per decision group.", } func (PlacementSpec) SwaggerDoc() map[string]string { @@ -171,6 +127,7 @@ func (PlacementSpec) SwaggerDoc() map[string]string { var map_PlacementStatus = map[string]string{ "numberOfSelectedClusters": "NumberOfSelectedClusters represents the number of selected ManagedClusters", + "decisionGroups": "List of decision groups determined by the placement and DecisionStrategy.", "conditions": "Conditions contains the different condition status for this Placement.", } diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml b/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml index c0d11bb93..35109c2e5 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml +++ b/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml @@ -22,10 +22,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - deprecated: true - deprecationWarning: cluster.open-cluster-management.io/v1beta1 ManagedClusterSet - is deprecated; use cluster.open-cluster-management.io/v1beta2 ManagedClusterSet - name: v1beta1 + name: v1beta2 schema: openAPIV3Schema: description: "ManagedClusterSet defines a group of ManagedClusters that user's @@ -56,12 +53,12 @@ spec: spec: default: clusterSelector: - selectorType: LegacyClusterSetLabel + selectorType: ExclusiveClusterSetLabel description: Spec defines the attributes of the ManagedClusterSet properties: clusterSelector: default: - selectorType: LegacyClusterSetLabel + selectorType: ExclusiveClusterSetLabel description: ClusterSelector represents a selector of ManagedClusters properties: labelSelector: @@ -111,14 +108,14 @@ spec: type: object x-kubernetes-map-type: atomic selectorType: - default: LegacyClusterSetLabel - description: SelectorType could only be "LegacyClusterSetLabel" - or "LabelSelector" "LegacyClusterSetLabel" means to use label + default: ExclusiveClusterSetLabel + description: SelectorType could only be "ExclusiveClusterSetLabel" + or "LabelSelector" "ExclusiveClusterSetLabel" means to use label "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use labelSelector to select target managedClusters enum: - - LegacyClusterSetLabel + - ExclusiveClusterSetLabel - LabelSelector type: string type: object @@ -199,178 +196,6 @@ spec: type: object type: object served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=="ClusterSetEmpty")].status - name: Empty - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta2 - schema: - openAPIV3Schema: - description: "ManagedClusterSet defines a group of ManagedClusters that user's - workload can run on. A workload can be defined to deployed on a ManagedClusterSet, - which mean: 1. The workload can run on any ManagedCluster in the ManagedClusterSet - 2. The workload cannot run on any ManagedCluster outside the ManagedClusterSet - 3. The service exposed by the workload can be shared in any ManagedCluster - in the ManagedClusterSet \n In order to assign a ManagedCluster to a certian - ManagedClusterSet, add a label with name `cluster.open-cluster-management.io/clusterset` - on the ManagedCluster to refers to the ManagedClusterSet. User is not allow - to add/remove this label on a ManagedCluster unless they have a RBAC rule - to CREATE on a virtual subresource of managedclustersets/join. In order - to update this label, user must have the permission on both the old and - new ManagedClusterSet." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - default: - clusterSelector: - selectorType: ExclusiveClusterSetLabel - description: Spec defines the attributes of the ManagedClusterSet - properties: - clusterSelector: - default: - selectorType: ExclusiveClusterSetLabel - description: ClusterSelector represents a selector of ManagedClusters - properties: - labelSelector: - description: LabelSelector define the general labelSelector which - clusterset will use to select target managedClusters - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. - type: object - type: object - selectorType: - default: ExclusiveClusterSetLabel - description: SelectorType could only be "ExclusiveClusterSetLabel" - or "LabelSelector" "ExclusiveClusterSetLabel" means to use label - "cluster.open-cluster-management.io/clusterset:"" to select target clusters. "LabelSelector" means use - labelSelector to select target managedClusters - enum: - - ExclusiveClusterSetLabel - - LabelSelector - type: string - type: object - type: object - status: - description: Status represents the current status of the ManagedClusterSet - properties: - conditions: - description: Conditions contains the different condition statuses - for this ManagedClusterSet. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - type: object - type: object - served: true storage: true subresources: status: {} diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml b/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml index e07e082e2..cfba3ffa1 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml +++ b/vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml @@ -15,10 +15,7 @@ spec: preserveUnknownFields: false scope: Namespaced versions: - - deprecated: true - deprecationWarning: cluster.open-cluster-management.io/v1beta1 ManagedClusterSetBinding - is deprecated; use cluster.open-cluster-management.io/v1beta2 ManagedClusterSetBinding - name: v1beta1 + - name: v1beta2 schema: openAPIV3Schema: description: ManagedClusterSetBinding projects a ManagedClusterSet into a @@ -128,108 +125,6 @@ spec: type: object type: object served: true - storage: false - subresources: - status: {} - - name: v1beta2 - schema: - openAPIV3Schema: - description: ManagedClusterSetBinding projects a ManagedClusterSet into a - certain namespace. User is able to create a ManagedClusterSetBinding in - a namespace and bind it to a ManagedClusterSet if they have an RBAC rule - to CREATE on the virtual subresource of managedclustersets/bind. Workloads - created in the same namespace can only be distributed to ManagedClusters - in ManagedClusterSets bound in this namespace by higher level controllers. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the attributes of ManagedClusterSetBinding. - properties: - clusterSet: - description: ClusterSet is the name of the ManagedClusterSet to bind. - It must match the instance name of the ManagedClusterSetBinding - and cannot change once created. User is allowed to set this field - if they have an RBAC rule to CREATE on the virtual subresource of - managedclustersets/bind. - minLength: 1 - type: string - type: object - status: - description: Status represents the current status of the ManagedClusterSetBinding - properties: - conditions: - description: Conditions contains the different condition statuses - for this ManagedClusterSetBinding. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - type: object - type: object - served: true storage: true subresources: status: {} diff --git a/vendor/open-cluster-management.io/api/feature/feature.go b/vendor/open-cluster-management.io/api/feature/feature.go index 806ec7e5f..ebec0e733 100644 --- a/vendor/open-cluster-management.io/api/feature/feature.go +++ b/vendor/open-cluster-management.io/api/feature/feature.go @@ -74,7 +74,7 @@ const ( // add it here. var DefaultSpokeRegistrationFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ ClusterClaim: {Default: true, PreRelease: featuregate.Beta}, - AddonManagement: {Default: false, PreRelease: featuregate.Alpha}, + AddonManagement: {Default: true, PreRelease: featuregate.Alpha}, V1beta1CSRAPICompatibility: {Default: false, PreRelease: featuregate.Alpha}, } @@ -88,7 +88,7 @@ var DefaultHubRegistrationFeatureGates = map[featuregate.Feature]featuregate.Fea } var DefaultHubAddonManagerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - AddonManagement: {Default: false, PreRelease: featuregate.Alpha}, + AddonManagement: {Default: true, PreRelease: featuregate.Alpha}, } // DefaultHubWorkFeatureGates consists of all known acm work wehbook feature keys. diff --git a/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index a64a112f8..2673c9236 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -49,20 +49,22 @@ spec: description: DeployOption contains the options of deploying a klusterlet properties: mode: - description: 'Mode can be Default or Hosted. It is Default mode - if not specified In Default mode, all klusterlet related resources - are deployed on the managed cluster. In Hosted mode, only crd - and configurations are installed on the spoke/managed cluster. - Controllers run in another cluster (defined as management-cluster) + description: 'Mode can be Default, Hosted, Singleton or SingletonHosted. + It is Default mode if not specified In Default mode, all klusterlet + related resources are deployed on the managed cluster. In Hosted + mode, only crd and configurations are installed on the spoke/managed + cluster. Controllers run in another cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of "external-managed-kubeconfig"(a kubeconfig of managed-cluster - with cluster-admin permission). Note: Do not modify the Mode - field once it''s applied.' + with cluster-admin permission). In Singleton mode, registration/work + agent is started as a single deployment. In SingletonHosted + mode, agent is started as a single deployment in hosted mode. + Note: Do not modify the Mode field once it''s applied.' type: string type: object externalServerURLs: - description: ExternalServerURLs represents the a list of apiserver - urls and ca bundles that is accessible externally If it is set empty, + description: ExternalServerURLs represents a list of apiserver urls + and ca bundles that is accessible externally If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. items: @@ -98,6 +100,11 @@ spec: - hostname - ip type: object + imagePullSpec: + description: ImagePullSpec represents the desired image configuration + of agent, it takes effect only when singleton mode is set. quay.io/open-cluster-management.io/registration-operator:latest + will be used if unspecified + type: string namespace: description: Namespace is the namespace to deploy the agent on the managed cluster. The namespace must have a prefix of "open-cluster-management-", @@ -122,8 +129,8 @@ spec: on. The default is an empty list. type: object tolerations: - description: Tolerations is attached by pods to tolerate any taint - that matches the triple using the matching + description: Tolerations are attached by pods to tolerate any + taint that matches the triple using the matching operator . The default is an empty list. items: description: The pod this Toleration is attached to tolerates @@ -178,6 +185,13 @@ spec: set. format: int32 type: integer + clusterAnnotations: + additionalProperties: + type: string + description: ClusterAnnotations is annotations with the reserve + prefix "agent.open-cluster-management.io" set on ManagedCluster + when creating only, other actors can update it afterwards. + type: object featureGates: description: 'FeatureGates represents the list of feature gates for registration If it is set empty, default feature gates will diff --git a/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml b/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml index eccbcbc70..b57e95f5f 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml +++ b/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml @@ -88,7 +88,7 @@ spec: Default mode is used if DeployOption is not set. properties: hosted: - description: Hosted includes configurations we needs for clustermanager + description: Hosted includes configurations we need for clustermanager in the Hosted mode. properties: registrationWebhookConfiguration: @@ -160,8 +160,8 @@ spec: on. The default is an empty list. type: object tolerations: - description: Tolerations is attached by pods to tolerate any taint - that matches the triple using the matching + description: Tolerations are attached by pods to tolerate any + taint that matches the triple using the matching operator . The default is an empty list. items: description: The pod this Toleration is attached to tolerates diff --git a/vendor/open-cluster-management.io/api/operator/v1/types.go b/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go similarity index 59% rename from vendor/open-cluster-management.io/api/operator/v1/types.go rename to vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go index b295a04b2..e833fa7bf 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/types.go +++ b/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go @@ -72,6 +72,19 @@ type ClusterManagerSpec struct { AddOnManagerConfiguration *AddOnManagerConfiguration `json:"addOnManagerConfiguration,omitempty"` } +// NodePlacement describes node scheduling configuration for the pods. +type NodePlacement struct { + // NodeSelector defines which Nodes the Pods are scheduled on. The default is an empty list. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations are attached by pods to tolerate any taint that matches + // the triple using the matching operator . + // The default is an empty list. + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` +} + type RegistrationHubConfiguration struct { // AutoApproveUser represents a list of users that can auto approve CSR and accept client. If the credential of the // bootstrap-hub-kubeconfig matches to the users, the cluster created by the bootstrap-hub-kubeconfig will @@ -91,24 +104,6 @@ type RegistrationHubConfiguration struct { FeatureGates []FeatureGate `json:"featureGates,omitempty"` } -type RegistrationConfiguration struct { - // clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the default - // duration seconds will be set by the hub cluster. If the value is larger than the max signing duration seconds set on - // the hub cluster, the max signing duration seconds will be set. - // +optional - ClientCertExpirationSeconds int32 `json:"clientCertExpirationSeconds,omitempty"` - - // FeatureGates represents the list of feature gates for registration - // If it is set empty, default feature gates will be used. - // If it is set, featuregate/Foo is an example of one item in FeatureGates: - // 1. If featuregate/Foo does not exist, registration-operator will discard it - // 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true] - // 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false, - // he can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false. - // +optional - FeatureGates []FeatureGate `json:"featureGates,omitempty"` -} - type WorkConfiguration struct { // FeatureGates represents the list of feature gates for work // If it is set empty, default feature gates will be used. @@ -151,8 +146,9 @@ type FeatureGate struct { type FeatureGateModeType string const ( - // Valid FeatureGateModeType value is Enable, Disable. - FeatureGateModeTypeEnable FeatureGateModeType = "Enable" + // FeatureGateModeTypeEnable is the feature gate type to enable a feature. + FeatureGateModeTypeEnable FeatureGateModeType = "Enable" + // FeatureGateModeTypeDisable is the feature gate type to disable a feature. FeatureGateModeTypeDisable FeatureGateModeType = "Disable" ) @@ -185,19 +181,7 @@ type WebhookConfiguration struct { Port int32 `json:"port,omitempty"` } -// KlusterletDeployOption describes the deploy options for klusterlet -type KlusterletDeployOption struct { - // Mode can be Default or Hosted. It is Default mode if not specified - // In Default mode, all klusterlet related resources are deployed on the managed cluster. - // In Hosted mode, only crd and configurations are installed on the spoke/managed cluster. Controllers run in another - // cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of - // "external-managed-kubeconfig"(a kubeconfig of managed-cluster with cluster-admin permission). - // Note: Do not modify the Mode field once it's applied. - // +optional - Mode InstallMode `json:"mode"` -} - -// ClusterManagerDeployOption describes the deploy options for cluster-manager +// ClusterManagerDeployOption describes the deployment options for cluster-manager type ClusterManagerDeployOption struct { // Mode can be Default or Hosted. // In Default mode, the Hub is installed as a whole and all parts of Hub are deployed in the same cluster. @@ -212,7 +196,7 @@ type ClusterManagerDeployOption struct { // +kubebuilder:validation:Enum=Default;Hosted Mode InstallMode `json:"mode,omitempty"` - // Hosted includes configurations we needs for clustermanager in the Hosted mode. + // Hosted includes configurations we need for clustermanager in the Hosted mode. // +optional Hosted *HostedClusterManagerConfiguration `json:"hosted,omitempty"` } @@ -226,8 +210,14 @@ const ( InstallModeDefault InstallMode = "Default" // InstallModeHosted means deploying components outside. - // The cluster-manager will be deployed outside of the hub-cluster, the klusterlet will be deployed outside of the managed-cluster. + // The cluster-manager will be deployed outside the hub-cluster, the klusterlet will be deployed outside the managed-cluster. InstallModeHosted InstallMode = "Hosted" + + // InstallModeSingleton means deploying components as a single controller. + InstallModeSingleton InstallMode = "Singleton" + + // InstallModeSingleton means deploying components as a single controller in hosted mode. + InstallModeSingletonHosted InstallMode = "SingletonHosted" ) // ClusterManagerStatus represents the current status of the registration and work distribution controllers running on the hub. @@ -318,162 +308,3 @@ type ClusterManagerList struct { // Items is a list of deployment configurations for registration and work distribution controllers. Items []ClusterManager `json:"items"` } - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster - -// Klusterlet represents controllers to install the resources for a managed cluster. -// When configured, the Klusterlet requires a secret named bootstrap-hub-kubeconfig in the -// agent namespace to allow API requests to the hub for the registration protocol. -// In Hosted mode, the Klusterlet requires an additional secret named external-managed-kubeconfig -// in the agent namespace to allow API requests to the managed cluster for resources installation. -type Klusterlet struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec represents the desired deployment configuration of Klusterlet agent. - Spec KlusterletSpec `json:"spec,omitempty"` - - // Status represents the current status of Klusterlet agent. - Status KlusterletStatus `json:"status,omitempty"` -} - -// KlusterletSpec represents the desired deployment configuration of Klusterlet agent. -type KlusterletSpec struct { - // Namespace is the namespace to deploy the agent on the managed cluster. - // The namespace must have a prefix of "open-cluster-management-", and if it is not set, - // the namespace of "open-cluster-management-agent" is used to deploy agent. - // In addition, the add-ons are deployed to the namespace of "{Namespace}-addon". - // In the Hosted mode, this namespace still exists on the managed cluster to contain - // necessary resources, like service accounts, roles and rolebindings, while the agent - // is deployed to the namespace with the same name as klusterlet on the management cluster. - // +optional - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=^open-cluster-management-[-a-z0-9]*[a-z0-9]$ - Namespace string `json:"namespace,omitempty"` - - // RegistrationImagePullSpec represents the desired image configuration of registration agent. - // quay.io/open-cluster-management.io/registration:latest will be used if unspecified. - // +optional - RegistrationImagePullSpec string `json:"registrationImagePullSpec,omitempty"` - - // WorkImagePullSpec represents the desired image configuration of work agent. - // quay.io/open-cluster-management.io/work:latest will be used if unspecified. - // +optional - WorkImagePullSpec string `json:"workImagePullSpec,omitempty"` - - // ClusterName is the name of the managed cluster to be created on hub. - // The Klusterlet agent generates a random name if it is not set, or discovers the appropriate cluster name on OpenShift. - // +optional - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - ClusterName string `json:"clusterName,omitempty"` - - // ExternalServerURLs represents the a list of apiserver urls and ca bundles that is accessible externally - // If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. - // +optional - ExternalServerURLs []ServerURL `json:"externalServerURLs,omitempty"` - - // NodePlacement enables explicit control over the scheduling of the deployed pods. - // +optional - NodePlacement NodePlacement `json:"nodePlacement,omitempty"` - - // DeployOption contains the options of deploying a klusterlet - // +optional - DeployOption KlusterletDeployOption `json:"deployOption,omitempty"` - - // RegistrationConfiguration contains the configuration of registration - // +optional - RegistrationConfiguration *RegistrationConfiguration `json:"registrationConfiguration,omitempty"` - - // WorkConfiguration contains the configuration of work - // +optional - WorkConfiguration *WorkConfiguration `json:"workConfiguration,omitempty"` - - // HubApiServerHostAlias contains the host alias for hub api server. - // registration-agent and work-agent will use it to communicate with hub api server. - // +optional - HubApiServerHostAlias *HubApiServerHostAlias `json:"hubApiServerHostAlias,omitempty"` -} - -// ServerURL represents the apiserver url and ca bundle that is accessible externally -type ServerURL struct { - // URL is the url of apiserver endpoint of the managed cluster. - // +required - URL string `json:"url"` - - // CABundle is the ca bundle to connect to apiserver of the managed cluster. - // System certs are used if it is not set. - // +optional - CABundle []byte `json:"caBundle,omitempty"` -} - -// NodePlacement describes node scheduling configuration for the pods. -type NodePlacement struct { - // NodeSelector defines which Nodes the Pods are scheduled on. The default is an empty list. - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // Tolerations is attached by pods to tolerate any taint that matches - // the triple using the matching operator . - // The default is an empty list. - // +optional - Tolerations []v1.Toleration `json:"tolerations,omitempty"` -} - -// HubApiServerHostAlias holds the mapping between IP and hostname that will be injected as an entry in the -// pod's hosts file. -type HubApiServerHostAlias struct { - // IP address of the host file entry. - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$` - IP string `json:"ip"` - - // Hostname for the above IP address. - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$` - Hostname string `json:"hostname"` -} - -// KlusterletStatus represents the current status of Klusterlet agent. -type KlusterletStatus struct { - // ObservedGeneration is the last generation change you've dealt with - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // Conditions contain the different condition statuses for this Klusterlet. - // Valid condition types are: - // Applied: Components have been applied in the managed cluster. - // Available: Components in the managed cluster are available and ready to serve. - // Progressing: Components in the managed cluster are in a transitioning state. - // Degraded: Components in the managed cluster do not match the desired configuration and only provide - // degraded service. - Conditions []metav1.Condition `json:"conditions"` - - // Generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - // +optional - Generations []GenerationStatus `json:"generations,omitempty"` - - // RelatedResources are used to track the resources that are related to this Klusterlet. - // +optional - RelatedResources []RelatedResourceMeta `json:"relatedResources,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KlusterletList is a collection of Klusterlet agents. -type KlusterletList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // Items is a list of Klusterlet agents. - Items []Klusterlet `json:"items"` -} diff --git a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go new file mode 100644 index 000000000..dd37fba8c --- /dev/null +++ b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go @@ -0,0 +1,197 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster + +// Klusterlet represents controllers to install the resources for a managed cluster. +// When configured, the Klusterlet requires a secret named bootstrap-hub-kubeconfig in the +// agent namespace to allow API requests to the hub for the registration protocol. +// In Hosted mode, the Klusterlet requires an additional secret named external-managed-kubeconfig +// in the agent namespace to allow API requests to the managed cluster for resources installation. +type Klusterlet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec represents the desired deployment configuration of Klusterlet agent. + Spec KlusterletSpec `json:"spec,omitempty"` + + // Status represents the current status of Klusterlet agent. + Status KlusterletStatus `json:"status,omitempty"` +} + +// KlusterletSpec represents the desired deployment configuration of Klusterlet agent. +type KlusterletSpec struct { + // Namespace is the namespace to deploy the agent on the managed cluster. + // The namespace must have a prefix of "open-cluster-management-", and if it is not set, + // the namespace of "open-cluster-management-agent" is used to deploy agent. + // In addition, the add-ons are deployed to the namespace of "{Namespace}-addon". + // In the Hosted mode, this namespace still exists on the managed cluster to contain + // necessary resources, like service accounts, roles and rolebindings, while the agent + // is deployed to the namespace with the same name as klusterlet on the management cluster. + // +optional + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=^open-cluster-management-[-a-z0-9]*[a-z0-9]$ + Namespace string `json:"namespace,omitempty"` + + // RegistrationImagePullSpec represents the desired image configuration of registration agent. + // quay.io/open-cluster-management.io/registration:latest will be used if unspecified. + // +optional + RegistrationImagePullSpec string `json:"registrationImagePullSpec,omitempty"` + + // WorkImagePullSpec represents the desired image configuration of work agent. + // quay.io/open-cluster-management.io/work:latest will be used if unspecified. + // +optional + WorkImagePullSpec string `json:"workImagePullSpec,omitempty"` + + // ImagePullSpec represents the desired image configuration of agent, it takes effect only when + // singleton mode is set. quay.io/open-cluster-management.io/registration-operator:latest will + // be used if unspecified + // +optional + ImagePullSpec string `json:"imagePullSpec,omitempty"` + + // ClusterName is the name of the managed cluster to be created on hub. + // The Klusterlet agent generates a random name if it is not set, or discovers the appropriate cluster name on OpenShift. + // +optional + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + ClusterName string `json:"clusterName,omitempty"` + + // ExternalServerURLs represents a list of apiserver urls and ca bundles that is accessible externally + // If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. + // +optional + ExternalServerURLs []ServerURL `json:"externalServerURLs,omitempty"` + + // NodePlacement enables explicit control over the scheduling of the deployed pods. + // +optional + NodePlacement NodePlacement `json:"nodePlacement,omitempty"` + + // DeployOption contains the options of deploying a klusterlet + // +optional + DeployOption KlusterletDeployOption `json:"deployOption,omitempty"` + + // RegistrationConfiguration contains the configuration of registration + // +optional + RegistrationConfiguration *RegistrationConfiguration `json:"registrationConfiguration,omitempty"` + + // WorkConfiguration contains the configuration of work + // +optional + WorkConfiguration *WorkConfiguration `json:"workConfiguration,omitempty"` + + // HubApiServerHostAlias contains the host alias for hub api server. + // registration-agent and work-agent will use it to communicate with hub api server. + // +optional + HubApiServerHostAlias *HubApiServerHostAlias `json:"hubApiServerHostAlias,omitempty"` +} + +// ServerURL represents the apiserver url and ca bundle that is accessible externally +type ServerURL struct { + // URL is the url of apiserver endpoint of the managed cluster. + // +required + URL string `json:"url"` + + // CABundle is the ca bundle to connect to apiserver of the managed cluster. + // System certs are used if it is not set. + // +optional + CABundle []byte `json:"caBundle,omitempty"` +} + +// HubApiServerHostAlias holds the mapping between IP and hostname that will be injected as an entry in the +// pod's hosts file. +type HubApiServerHostAlias struct { + // IP address of the host file entry. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$` + IP string `json:"ip"` + + // Hostname for the above IP address. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$` + Hostname string `json:"hostname"` +} + +type RegistrationConfiguration struct { + // clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the default + // duration seconds will be set by the hub cluster. If the value is larger than the max signing duration seconds set on + // the hub cluster, the max signing duration seconds will be set. + // +optional + ClientCertExpirationSeconds int32 `json:"clientCertExpirationSeconds,omitempty"` + + // FeatureGates represents the list of feature gates for registration + // If it is set empty, default feature gates will be used. + // If it is set, featuregate/Foo is an example of one item in FeatureGates: + // 1. If featuregate/Foo does not exist, registration-operator will discard it + // 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true] + // 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false, + // he can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false. + // +optional + FeatureGates []FeatureGate `json:"featureGates,omitempty"` + + // ClusterAnnotations is annotations with the reserve prefix "agent.open-cluster-management.io" set on + // ManagedCluster when creating only, other actors can update it afterwards. + // +optional + ClusterAnnotations map[string]string `json:"clusterAnnotations,omitempty"` +} + +const ( + // ClusterAnnotationsKeyPrefix is the prefix of annotations set on ManagedCluster when creating only. + ClusterAnnotationsKeyPrefix = "agent.open-cluster-management.io" +) + +// KlusterletDeployOption describes the deployment options for klusterlet +type KlusterletDeployOption struct { + // Mode can be Default, Hosted, Singleton or SingletonHosted. It is Default mode if not specified + // In Default mode, all klusterlet related resources are deployed on the managed cluster. + // In Hosted mode, only crd and configurations are installed on the spoke/managed cluster. Controllers run in another + // cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of + // "external-managed-kubeconfig"(a kubeconfig of managed-cluster with cluster-admin permission). + // In Singleton mode, registration/work agent is started as a single deployment. + // In SingletonHosted mode, agent is started as a single deployment in hosted mode. + // Note: Do not modify the Mode field once it's applied. + // +optional + Mode InstallMode `json:"mode"` +} + +// KlusterletStatus represents the current status of Klusterlet agent. +type KlusterletStatus struct { + // ObservedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions contain the different condition statuses for this Klusterlet. + // Valid condition types are: + // Applied: Components have been applied in the managed cluster. + // Available: Components in the managed cluster are available and ready to serve. + // Progressing: Components in the managed cluster are in a transitioning state. + // Degraded: Components in the managed cluster do not match the desired configuration and only provide + // degraded service. + Conditions []metav1.Condition `json:"conditions"` + + // Generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + // +optional + Generations []GenerationStatus `json:"generations,omitempty"` + + // RelatedResources are used to track the resources that are related to this Klusterlet. + // +optional + RelatedResources []RelatedResourceMeta `json:"relatedResources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KlusterletList is a collection of Klusterlet agents. +type KlusterletList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of Klusterlet agents. + Items []Klusterlet `json:"items"` +} diff --git a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go index b586d8095..40cd4080e 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go @@ -434,6 +434,13 @@ func (in *RegistrationConfiguration) DeepCopyInto(out *RegistrationConfiguration *out = make([]FeatureGate, len(*in)) copy(*out, *in) } + if in.ClusterAnnotations != nil { + in, out := &in.ClusterAnnotations, &out.ClusterAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } diff --git a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go index 13d8326f9..fefa0333c 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -30,9 +30,9 @@ func (ClusterManager) SwaggerDoc() map[string]string { } var map_ClusterManagerDeployOption = map[string]string{ - "": "ClusterManagerDeployOption describes the deploy options for cluster-manager", + "": "ClusterManagerDeployOption describes the deployment options for cluster-manager", "mode": "Mode can be Default or Hosted. In Default mode, the Hub is installed as a whole and all parts of Hub are deployed in the same cluster. In Hosted mode, only crd and configurations are installed on one cluster(defined as hub-cluster). Controllers run in another cluster (defined as management-cluster) and connect to the hub with the kubeconfig in secret of \"external-hub-kubeconfig\"(a kubeconfig of hub-cluster with cluster-admin permission). Note: Do not modify the Mode field once it's applied.", - "hosted": "Hosted includes configurations we needs for clustermanager in the Hosted mode.", + "hosted": "Hosted includes configurations we need for clustermanager in the Hosted mode.", } func (ClusterManagerDeployOption) SwaggerDoc() map[string]string { @@ -111,6 +111,56 @@ func (HostedClusterManagerConfiguration) SwaggerDoc() map[string]string { return map_HostedClusterManagerConfiguration } +var map_NodePlacement = map[string]string{ + "": "NodePlacement describes node scheduling configuration for the pods.", + "nodeSelector": "NodeSelector defines which Nodes the Pods are scheduled on. The default is an empty list.", + "tolerations": "Tolerations are attached by pods to tolerate any taint that matches the triple using the matching operator . The default is an empty list.", +} + +func (NodePlacement) SwaggerDoc() map[string]string { + return map_NodePlacement +} + +var map_RegistrationHubConfiguration = map[string]string{ + "autoApproveUsers": "AutoApproveUser represents a list of users that can auto approve CSR and accept client. If the credential of the bootstrap-hub-kubeconfig matches to the users, the cluster created by the bootstrap-hub-kubeconfig will be auto-registered into the hub cluster. This takes effect only when ManagedClusterAutoApproval feature gate is enabled.", + "featureGates": "FeatureGates represents the list of feature gates for registration If it is set empty, default feature gates will be used. If it is set, featuregate/Foo is an example of one item in FeatureGates:\n 1. If featuregate/Foo does not exist, registration-operator will discard it\n 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true]\n 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n \the can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false.", +} + +func (RegistrationHubConfiguration) SwaggerDoc() map[string]string { + return map_RegistrationHubConfiguration +} + +var map_RelatedResourceMeta = map[string]string{ + "": "RelatedResourceMeta represents the resource that is managed by an operator", + "group": "group is the group of the resource that you're tracking", + "version": "version is the version of the thing you're tracking", + "resource": "resource is the resource type of the resource that you're tracking", + "namespace": "namespace is where the thing you're tracking is", + "name": "name is the name of the resource that you're tracking", +} + +func (RelatedResourceMeta) SwaggerDoc() map[string]string { + return map_RelatedResourceMeta +} + +var map_WebhookConfiguration = map[string]string{ + "": "WebhookConfiguration has two properties: Address and Port.", + "address": "Address represents the address of a webhook-server. It could be in IP format or fqdn format. The Address must be reachable by apiserver of the hub cluster.", + "port": "Port represents the port of a webhook-server. The default value of Port is 443.", +} + +func (WebhookConfiguration) SwaggerDoc() map[string]string { + return map_WebhookConfiguration +} + +var map_WorkConfiguration = map[string]string{ + "featureGates": "FeatureGates represents the list of feature gates for work If it is set empty, default feature gates will be used. If it is set, featuregate/Foo is an example of one item in FeatureGates:\n 1. If featuregate/Foo does not exist, registration-operator will discard it\n 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true]\n 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n \the can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false.", +} + +func (WorkConfiguration) SwaggerDoc() map[string]string { + return map_WorkConfiguration +} + var map_HubApiServerHostAlias = map[string]string{ "": "HubApiServerHostAlias holds the mapping between IP and hostname that will be injected as an entry in the pod's hosts file.", "ip": "IP address of the host file entry.", @@ -132,8 +182,8 @@ func (Klusterlet) SwaggerDoc() map[string]string { } var map_KlusterletDeployOption = map[string]string{ - "": "KlusterletDeployOption describes the deploy options for klusterlet", - "mode": "Mode can be Default or Hosted. It is Default mode if not specified In Default mode, all klusterlet related resources are deployed on the managed cluster. In Hosted mode, only crd and configurations are installed on the spoke/managed cluster. Controllers run in another cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of \"external-managed-kubeconfig\"(a kubeconfig of managed-cluster with cluster-admin permission). Note: Do not modify the Mode field once it's applied.", + "": "KlusterletDeployOption describes the deployment options for klusterlet", + "mode": "Mode can be Default, Hosted, Singleton or SingletonHosted. It is Default mode if not specified In Default mode, all klusterlet related resources are deployed on the managed cluster. In Hosted mode, only crd and configurations are installed on the spoke/managed cluster. Controllers run in another cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of \"external-managed-kubeconfig\"(a kubeconfig of managed-cluster with cluster-admin permission). In Singleton mode, registration/work agent is started as a single deployment. In SingletonHosted mode, agent is started as a single deployment in hosted mode. Note: Do not modify the Mode field once it's applied.", } func (KlusterletDeployOption) SwaggerDoc() map[string]string { @@ -155,8 +205,9 @@ var map_KlusterletSpec = map[string]string{ "namespace": "Namespace is the namespace to deploy the agent on the managed cluster. The namespace must have a prefix of \"open-cluster-management-\", and if it is not set, the namespace of \"open-cluster-management-agent\" is used to deploy agent. In addition, the add-ons are deployed to the namespace of \"{Namespace}-addon\". In the Hosted mode, this namespace still exists on the managed cluster to contain necessary resources, like service accounts, roles and rolebindings, while the agent is deployed to the namespace with the same name as klusterlet on the management cluster.", "registrationImagePullSpec": "RegistrationImagePullSpec represents the desired image configuration of registration agent. quay.io/open-cluster-management.io/registration:latest will be used if unspecified.", "workImagePullSpec": "WorkImagePullSpec represents the desired image configuration of work agent. quay.io/open-cluster-management.io/work:latest will be used if unspecified.", + "imagePullSpec": "ImagePullSpec represents the desired image configuration of agent, it takes effect only when singleton mode is set. quay.io/open-cluster-management.io/registration-operator:latest will be used if unspecified", "clusterName": "ClusterName is the name of the managed cluster to be created on hub. The Klusterlet agent generates a random name if it is not set, or discovers the appropriate cluster name on OpenShift.", - "externalServerURLs": "ExternalServerURLs represents the a list of apiserver urls and ca bundles that is accessible externally If it is set empty, managed cluster has no externally accessible url that hub cluster can visit.", + "externalServerURLs": "ExternalServerURLs represents a list of apiserver urls and ca bundles that is accessible externally If it is set empty, managed cluster has no externally accessible url that hub cluster can visit.", "nodePlacement": "NodePlacement enables explicit control over the scheduling of the deployed pods.", "deployOption": "DeployOption contains the options of deploying a klusterlet", "registrationConfiguration": "RegistrationConfiguration contains the configuration of registration", @@ -180,47 +231,16 @@ func (KlusterletStatus) SwaggerDoc() map[string]string { return map_KlusterletStatus } -var map_NodePlacement = map[string]string{ - "": "NodePlacement describes node scheduling configuration for the pods.", - "nodeSelector": "NodeSelector defines which Nodes the Pods are scheduled on. The default is an empty list.", - "tolerations": "Tolerations is attached by pods to tolerate any taint that matches the triple using the matching operator . The default is an empty list.", -} - -func (NodePlacement) SwaggerDoc() map[string]string { - return map_NodePlacement -} - var map_RegistrationConfiguration = map[string]string{ "clientCertExpirationSeconds": "clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the default duration seconds will be set by the hub cluster. If the value is larger than the max signing duration seconds set on the hub cluster, the max signing duration seconds will be set.", "featureGates": "FeatureGates represents the list of feature gates for registration If it is set empty, default feature gates will be used. If it is set, featuregate/Foo is an example of one item in FeatureGates:\n 1. If featuregate/Foo does not exist, registration-operator will discard it\n 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true]\n 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n \the can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false.", + "clusterAnnotations": "ClusterAnnotations is annotations with the reserve prefix \"agent.open-cluster-management.io\" set on ManagedCluster when creating only, other actors can update it afterwards.", } func (RegistrationConfiguration) SwaggerDoc() map[string]string { return map_RegistrationConfiguration } -var map_RegistrationHubConfiguration = map[string]string{ - "autoApproveUsers": "AutoApproveUser represents a list of users that can auto approve CSR and accept client. If the credential of the bootstrap-hub-kubeconfig matches to the users, the cluster created by the bootstrap-hub-kubeconfig will be auto-registered into the hub cluster. This takes effect only when ManagedClusterAutoApproval feature gate is enabled.", - "featureGates": "FeatureGates represents the list of feature gates for registration If it is set empty, default feature gates will be used. If it is set, featuregate/Foo is an example of one item in FeatureGates:\n 1. If featuregate/Foo does not exist, registration-operator will discard it\n 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true]\n 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n \the can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false.", -} - -func (RegistrationHubConfiguration) SwaggerDoc() map[string]string { - return map_RegistrationHubConfiguration -} - -var map_RelatedResourceMeta = map[string]string{ - "": "RelatedResourceMeta represents the resource that is managed by an operator", - "group": "group is the group of the resource that you're tracking", - "version": "version is the version of the thing you're tracking", - "resource": "resource is the resource type of the resource that you're tracking", - "namespace": "namespace is where the thing you're tracking is", - "name": "name is the name of the resource that you're tracking", -} - -func (RelatedResourceMeta) SwaggerDoc() map[string]string { - return map_RelatedResourceMeta -} - var map_ServerURL = map[string]string{ "": "ServerURL represents the apiserver url and ca bundle that is accessible externally", "url": "URL is the url of apiserver endpoint of the managed cluster.", @@ -231,22 +251,4 @@ func (ServerURL) SwaggerDoc() map[string]string { return map_ServerURL } -var map_WebhookConfiguration = map[string]string{ - "": "WebhookConfiguration has two properties: Address and Port.", - "address": "Address represents the address of a webhook-server. It could be in IP format or fqdn format. The Address must be reachable by apiserver of the hub cluster.", - "port": "Port represents the port of a webhook-server. The default value of Port is 443.", -} - -func (WebhookConfiguration) SwaggerDoc() map[string]string { - return map_WebhookConfiguration -} - -var map_WorkConfiguration = map[string]string{ - "featureGates": "FeatureGates represents the list of feature gates for work If it is set empty, default feature gates will be used. If it is set, featuregate/Foo is an example of one item in FeatureGates:\n 1. If featuregate/Foo does not exist, registration-operator will discard it\n 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true]\n 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n \the can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false.", -} - -func (WorkConfiguration) SwaggerDoc() map[string]string { - return map_WorkConfiguration -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/open-cluster-management.io/api/work/v1/types.go b/vendor/open-cluster-management.io/api/work/v1/types.go index c07d3c464..39b8ec2b2 100644 --- a/vendor/open-cluster-management.io/api/work/v1/types.go +++ b/vendor/open-cluster-management.io/api/work/v1/types.go @@ -475,22 +475,28 @@ const ( JsonRaw ValueType = "JsonRaw" ) -// ManifestConditionType represents the condition type of a single -// resource manifest deployed on the managed cluster. -type ManifestConditionType string - const ( // ManifestProgressing represents that the resource is being applied on the managed cluster - ManifestProgressing ManifestConditionType = "Progressing" + ManifestProgressing string = "Progressing" // ManifestApplied represents that the resource object is applied // on the managed cluster. - ManifestApplied ManifestConditionType = "Applied" + ManifestApplied string = "Applied" // ManifestAvailable represents that the resource object exists // on the managed cluster. - ManifestAvailable ManifestConditionType = "Available" + ManifestAvailable string = "Available" // ManifestDegraded represents that the current state of resource object does not // match the desired state for a certain period. - ManifestDegraded ManifestConditionType = "Degraded" + ManifestDegraded string = "Degraded" +) + +const ( + // ManifestWorkFinalizer is the name of the finalizer added to manifestworks. It is used to ensure + // related appliedmanifestwork of a manifestwork are deleted before the manifestwork itself is deleted + ManifestWorkFinalizer = "cluster.open-cluster-management.io/manifest-work-cleanup" + // AppliedManifestWorkFinalizer is the name of the finalizer added to appliedmanifestwork. It is to + // ensure all resource relates to appliedmanifestwork is deleted before appliedmanifestwork itself + // is deleted. + AppliedManifestWorkFinalizer = "cluster.open-cluster-management.io/applied-manifest-work-cleanup" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index 9315f0770..ecd82e555 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -325,6 +325,139 @@ spec: description: Name of the Placement resource in the current namespace minLength: 1 type: string + rolloutStrategy: + default: + all: + timeout: None + type: All + description: Rollout strategy to apply workload to the selected + clusters by Placement and DecisionStrategy. + properties: + all: + description: All define required fields for RolloutStrategy + type All + properties: + timeout: + default: None + description: Timeout define how long workload applier + controller will wait till workload reach successful + state in the cluster. Timeout default value is None + meaning the workload applier will not proceed apply + workload to other clusters if did not reach the successful + state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string + type: object + progressive: + description: Progressive define required fields for RolloutStrategy + type Progressive + properties: + mandatoryDecisionGroups: + description: List of the decision groups names or indexes + to apply the workload first and fail if workload did + not reach successful state. GroupName or GroupIndex + must match with the decisionGroups defined in the + placement's decisionStrategy + items: + description: MandatoryDecisionGroup set the decision + group name or group index. GroupName is considered + first to select the decisionGroups then GroupIndex. + properties: + groupIndex: + description: GroupIndex of the decision group + should match the placementDecisions label value + with label key cluster.open-cluster-management.io/decision-group-index + format: int32 + type: integer + groupName: + description: GroupName of the decision group should + match the placementDecisions label value with + label key cluster.open-cluster-management.io/decision-group-name + type: string + type: object + type: array + maxConcurrency: + anyOf: + - type: integer + - type: string + description: MaxConcurrency is the max number of clusters + to deploy workload concurrently. The default value + for MaxConcurrency is determined from the clustersPerDecisionGroup + defined in the placement->DecisionStrategy. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + timeout: + default: None + description: Timeout define how long workload applier + controller will wait till workload reach successful + state in the cluster. Timeout default value is None + meaning the workload applier will not proceed apply + workload to other clusters if did not reach the successful + state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string + type: object + progressivePerGroup: + description: ProgressivePerGroup define required fields + for RolloutStrategy type ProgressivePerGroup + properties: + mandatoryDecisionGroups: + description: List of the decision groups names or indexes + to apply the workload first and fail if workload did + not reach successful state. GroupName or GroupIndex + must match with the decisionGroups defined in the + placement's decisionStrategy + items: + description: MandatoryDecisionGroup set the decision + group name or group index. GroupName is considered + first to select the decisionGroups then GroupIndex. + properties: + groupIndex: + description: GroupIndex of the decision group + should match the placementDecisions label value + with label key cluster.open-cluster-management.io/decision-group-index + format: int32 + type: integer + groupName: + description: GroupName of the decision group should + match the placementDecisions label value with + label key cluster.open-cluster-management.io/decision-group-name + type: string + type: object + type: array + timeout: + default: None + description: Timeout define how long workload applier + controller will wait till workload reach successful + state in the cluster. Timeout default value is None + meaning the workload applier will not proceed apply + workload to other clusters if did not reach the successful + state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string + type: object + type: + default: All + description: Rollout strategy Types are All, Progressive + and ProgressivePerGroup 1) All means apply the workload + to all clusters in the decision groups at once. 2) Progressive + means apply the workload to the selected clusters progressively + per cluster. The workload will not be applied to the next + cluster unless one of the current applied clusters reach + the successful state or timeout. 3) ProgressivePerGroup + means apply the workload to decisionGroup clusters progressively + per group. The workload will not be applied to the next + decisionGroup unless all clusters in the current group + reach the successful state or timeout. + enum: + - All + - Progressive + - ProgressivePerGroup + type: string + type: object required: - name type: object @@ -409,8 +542,49 @@ spec: - type type: object type: array + placementSummary: + description: PlacementRef Summary + items: + description: PlacementSummary provides info regards number of clusters + and clusterGroups selected by the placement refs. + properties: + availableDecisionGroups: + description: availableDecisionGroups shows number of decisionGroups + that have all clusters manifestWorks in available state regards + total number of decisionGroups. ex; 2/4 (2 out of 4) + type: string + name: + description: PlacementRef Name + type: string + summary: + description: Summary totals of resulting ManifestWorks for the + placement + properties: + Applied: + description: 'Applied is the number of ManifestWorks with + condition Applied: true' + type: integer + available: + description: 'Available is the number of ManifestWorks with + condition Available: true' + type: integer + degraded: + description: 'TODO: Degraded is the number of ManifestWorks + with condition Degraded: true' + type: integer + progressing: + description: 'TODO: Progressing is the number of ManifestWorks + with condition Progressing: true' + type: integer + total: + description: Total number of ManifestWorks managed by the + ManifestWorkReplicaSet + type: integer + type: object + type: object + type: array summary: - description: Summary totals of resulting ManifestWorks + description: Summary totals of resulting ManifestWorks for all placements properties: Applied: description: 'Applied is the number of ManifestWorks with condition diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/types_manifestworkreplicaset.go b/vendor/open-cluster-management.io/api/work/v1alpha1/types_manifestworkreplicaset.go index 09b8c5e75..b68e00897 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/types_manifestworkreplicaset.go +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/types_manifestworkreplicaset.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + cluster "open-cluster-management.io/api/cluster/v1alpha1" work "open-cluster-management.io/api/work/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -75,8 +76,11 @@ type ManifestWorkReplicaSetStatus struct { // 2. PlacementRefValid Conditions []metav1.Condition `json:"conditions,omitempty"` - // Summary totals of resulting ManifestWorks + // Summary totals of resulting ManifestWorks for all placements Summary ManifestWorkReplicaSetSummary `json:"summary"` + + // PlacementRef Summary + PlacementsSummary []PlacementSummary `json:"placementSummary"` } // localPlacementReference is the name of a Placement resource in current namespace @@ -86,6 +90,23 @@ type LocalPlacementReference struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 Name string `json:"name"` + + // +optional + // +kubebuilder:default={type: All, all: {timeout: None}} + RolloutStrategy cluster.RolloutStrategy `json:"rolloutStrategy"` +} + +// PlacementSummary provides info regards number of clusters and clusterGroups selected by the placement refs. +type PlacementSummary struct { + // PlacementRef Name + Name string `json:"name"` + + // availableDecisionGroups shows number of decisionGroups that have all clusters manifestWorks in available state regards total number of decisionGroups. + // ex; 2/4 (2 out of 4) + AvailableDecisionGroups string `json:"availableDecisionGroups"` + + // Summary totals of resulting ManifestWorks for the placement + Summary ManifestWorkReplicaSetSummary `json:"summary"` } // ManifestWorkReplicaSetSummary provides reference counts of all ManifestWorks that are associated with a @@ -129,12 +150,23 @@ const ( // ReasonNotAsExpected is a reason for ManifestWorkReplicaSetConditionManifestworkApplied condition type representing // the ManifestWorkSet is not applied correctly. ReasonNotAsExpected = "NotAsExpected" + // ReasonProgressing is a reason for ManifestWorkReplicaSetConditionPlacementRolledOut condition type representing. + // The ManifestWorks are progressively applied to the placement clusters. + ReasonProgressing = "Progressing" + // ReasonComplete is a reason for ManifestWorkReplicaSetConditionPlacementRolledOut condition type representing. + // The ManifestWorks are completely applied to the placement clusters. + ReasonComplete = "Complete" // ManifestWorkSetConditionPlacementVerified indicates if Placement is valid // // Reason: AsExpected, PlacementDecisionNotFound, PlacementDecisionEmpty or NotAsExpected ManifestWorkReplicaSetConditionPlacementVerified string = "PlacementVerified" + // ManifestWorkReplicaSetConditionPlacementRolledOut indicates if RollOut Strategy is complete. + // + // Reason: Progressing or Complete. + ManifestWorkReplicaSetConditionPlacementRolledOut string = "PlacementRolledOut" + // ManifestWorkSetConditionManifestworkApplied confirms that a ManifestWork has been created in each cluster defined by PlacementDecision // // Reason: AsExpected, NotAsExpected or Processing diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/work/v1alpha1/zz_generated.deepcopy.go index a81786dd5..05af3f770 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/zz_generated.deepcopy.go @@ -13,6 +13,7 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalPlacementReference) DeepCopyInto(out *LocalPlacementReference) { *out = *in + in.RolloutStrategy.DeepCopyInto(&out.RolloutStrategy) return } @@ -94,7 +95,9 @@ func (in *ManifestWorkReplicaSetSpec) DeepCopyInto(out *ManifestWorkReplicaSetSp if in.PlacementRefs != nil { in, out := &in.PlacementRefs, &out.PlacementRefs *out = make([]LocalPlacementReference, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -120,6 +123,11 @@ func (in *ManifestWorkReplicaSetStatus) DeepCopyInto(out *ManifestWorkReplicaSet } } out.Summary = in.Summary + if in.PlacementsSummary != nil { + in, out := &in.PlacementsSummary, &out.PlacementsSummary + *out = make([]PlacementSummary, len(*in)) + copy(*out, *in) + } return } @@ -148,3 +156,20 @@ func (in *ManifestWorkReplicaSetSummary) DeepCopy() *ManifestWorkReplicaSetSumma in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementSummary) DeepCopyInto(out *PlacementSummary) { + *out = *in + out.Summary = in.Summary + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementSummary. +func (in *PlacementSummary) DeepCopy() *PlacementSummary { + if in == nil { + return nil + } + out := new(PlacementSummary) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/work/v1alpha1/zz_generated.swagger_doc_generated.go index 1eb35b56c..b158386e3 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/zz_generated.swagger_doc_generated.go @@ -49,9 +49,10 @@ func (ManifestWorkReplicaSetSpec) SwaggerDoc() map[string]string { } var map_ManifestWorkReplicaSetStatus = map[string]string{ - "": "ManifestWorkReplicaSetStatus defines the observed state of ManifestWorkReplicaSet", - "conditions": "Conditions contains the different condition statuses for distrbution of ManifestWork resources Valid condition types are: 1. AppliedManifestWorks represents ManifestWorks have been distributed as per placement All, Partial, None, Problem 2. PlacementRefValid", - "summary": "Summary totals of resulting ManifestWorks", + "": "ManifestWorkReplicaSetStatus defines the observed state of ManifestWorkReplicaSet", + "conditions": "Conditions contains the different condition statuses for distrbution of ManifestWork resources Valid condition types are: 1. AppliedManifestWorks represents ManifestWorks have been distributed as per placement All, Partial, None, Problem 2. PlacementRefValid", + "summary": "Summary totals of resulting ManifestWorks for all placements", + "placementSummary": "PlacementRef Summary", } func (ManifestWorkReplicaSetStatus) SwaggerDoc() map[string]string { @@ -69,4 +70,15 @@ func (ManifestWorkReplicaSetSummary) SwaggerDoc() map[string]string { return map_ManifestWorkReplicaSetSummary } +var map_PlacementSummary = map[string]string{ + "": "PlacementSummary provides info regards number of clusters and clusterGroups selected by the placement refs.", + "name": "PlacementRef Name", + "availableDecisionGroups": "availableDecisionGroups shows number of decisionGroups that have all clusters manifestWorks in available state regards total number of decisionGroups. ex; 2/4 (2 out of 4)", + "summary": "Summary totals of resulting ManifestWorks for the placement", +} + +func (PlacementSummary) SwaggerDoc() map[string]string { + return map_PlacementSummary +} + // AUTO-GENERATED FUNCTIONS END HERE