diff --git a/crds/clustervirtualimage.yaml b/crds/clustervirtualimage.yaml index 44c44efd3..3b4335a6e 100644 --- a/crds/clustervirtualimage.yaml +++ b/crds/clustervirtualimage.yaml @@ -188,31 +188,36 @@ spec: type: object properties: conditions: - type: array description: | - Hold the state information of the `ClusterVirtualImage`. + The latest available observations of an object's current state. + type: array items: type: object properties: - type: - type: string - status: + lastProbeTime: + description: Last time the condition was checked. + format: date-time type: string - reason: + lastTransitionTime: + description: Last time the condition transit from one status to another. + format: date-time type: string message: + description: Human readable message indicating details about last transition. type: string - lastProbeTime: + reason: + description: (brief) reason for the condition's last transition. type: string - format: date-time - nullable: true - lastTransitionTime: + status: + description: Status of the condition, one of True, False, Unknown. + type: string + enum: ["True", "False", "Unknown"] + type: + description: Type of condition. type: string - format: date-time - nullable: true required: - - type - status + - type downloadSpeed: type: object description: | @@ -287,9 +292,7 @@ spec: * Provisioning - The process of resource creation (copying/downloading/building the image) is in progress. * WaitForUserUpload - Waiting for the user to upload the image. The endpoint to upload the image is specified in `.status.uploadCommand`. * Ready - The resource is created and ready to use. - * Failed - There was a problem when creating a resource, details can be seen in `.status.failureReason` and `.status.failureMessage`. - * NotReady - It is not possible to get information about the child image because of inability to connect to DVCR. The resource cannot be used. - * ImageLost - The child image of the resource is missing. The resource cannot be used. + * Failed - There was a problem when creating a resource. * Terminating - The process of resource deletion is in progress. enum: [ @@ -298,9 +301,7 @@ spec: "WaitForUserUpload", "Ready", "Failed", - "ImageLost", "Terminating", - "Unknown", ] progress: type: string @@ -313,7 +314,7 @@ spec: observedGeneration: type: integer description: | - Represents the .metadata.generation that the status was set based upon. + The generation last processed by the controller. additionalPrinterColumns: - name: Phase type: string diff --git a/crds/doc-ru-clustervirtualimage.yaml b/crds/doc-ru-clustervirtualimage.yaml index 7feef3d4d..81f18f3e9 100644 --- a/crds/doc-ru-clustervirtualimage.yaml +++ b/crds/doc-ru-clustervirtualimage.yaml @@ -97,7 +97,22 @@ spec: properties: conditions: description: | - Содержит информацию о состоянии `ClusterVirtualImage`. + Последнее подтвержденное состояние данного ресурса. + items: + properties: + lastProbeTime: + description: Время проверки условия. + lastTransitionTime: + description: Время перехода условия из одного состояния в другое. + message: + description: Удобочитаемое сообщение с подробной информацией о последнем переходе. + reason: + description: Краткая причина последнего перехода состояния. + status: + description: | + Статус условия. Возможные значения: `True`, `False`, `Unknown`. + type: + description: Тип условия. cdrom: description: | Является ли образ форматом, который должен быть смонтирован как cdrom, например iso и т. д. @@ -128,9 +143,7 @@ spec: * Provisioning - Идет процесс создания ресурса (копирование/загрузка/создание образа). * WaitForUserUpload - Ожидание загрузки образа пользователем. Путь для загрузки образа указывается в `.status.uploadCommand`. * Ready - Ресурс создан и готов к использованию. - * Failed - При создании ресурса возникла проблема, подробности можно увидеть в `.status.failureReason` и `.status.failureMessage`. - * NotReady - Невозможно получить информацию о дочернем образе из-за невозможности подключения к DVCR. Ресурс не может быть использован. - * ImageLost - Дочернее образ ресурса отсутствует. Ресурс не может быть использован. + * Failed - При создании ресурса возникла проблема. * Terminating - Ресурс находится в процессе удаления. progress: description: | @@ -160,4 +173,4 @@ spec: Команда для загрузки образа для типа 'Upload'. observedGeneration: description: | - Представляет .metadata.generation, на основе которого был установлен статус. + Поколение ресурса, которое в последний раз обрабатывалось контроллером. diff --git a/crds/doc-ru-virtualdisk.yaml b/crds/doc-ru-virtualdisk.yaml index daa4dfe3b..4ffefbd21 100644 --- a/crds/doc-ru-virtualdisk.yaml +++ b/crds/doc-ru-virtualdisk.yaml @@ -94,7 +94,22 @@ spec: properties: conditions: description: | - Содержит информацию о состоянии `VirtualDisk`. + Последнее подтвержденное состояние данного ресурса. + items: + properties: + lastProbeTime: + description: Время проверки условия. + lastTransitionTime: + description: Время перехода условия из одного состояния в другое. + message: + description: Удобочитаемое сообщение с подробной информацией о последнем переходе. + reason: + description: Краткая причина последнего перехода состояния. + status: + description: | + Статус условия. Возможные значения: `True`, `False`, `Unknown`. + type: + description: Тип условия. attachedToVirtualMachines: description: | Список виртуальных машин, использующих этот диск @@ -125,7 +140,7 @@ spec: * Provisioning — идет процесс создания ресурса (копирование/загрузка/создание образа). * WaitForUserUpload — ожидание загрузки образа пользователем. Путь для загрузки образа указывается в `.status.uploadCommand`. * Ready — ресурс создан и готов к использованию. - * Failed — при создании ресурса возникла проблема, подробности можно увидеть в `.status.failureReason` и `.status.failureMessage`. + * Failed — при создании ресурса возникла проблема. * PVCLost — дочерний PVC ресурса отсутствует. Ресурс не может быть использован. * Terminating - Ресурс находится в процессе удаления. progress: @@ -141,4 +156,4 @@ spec: Команда для загрузки образа для типа 'Upload'. observedGeneration: description: | - Представляет .metadata.generation, на основе которого был установлен статус. + Поколение ресурса, которое в последний раз обрабатывалось контроллером. diff --git a/crds/virtualdisk.yaml b/crds/virtualdisk.yaml index 89da6d961..0a1d39c6d 100644 --- a/crds/virtualdisk.yaml +++ b/crds/virtualdisk.yaml @@ -193,31 +193,36 @@ spec: type: object properties: conditions: - type: array description: | - Hold the state information of the `ClusterVirtualImage`. + The latest available observations of an object's current state. + type: array items: type: object properties: - type: - type: string - status: + lastProbeTime: + description: Last time the condition was checked. + format: date-time type: string - reason: + lastTransitionTime: + description: Last time the condition transit from one status to another. + format: date-time type: string message: + description: Human readable message indicating details about last transition. type: string - lastProbeTime: + reason: + description: (brief) reason for the condition's last transition. type: string - format: date-time - nullable: true - lastTransitionTime: + status: + description: Status of the condition, one of True, False, Unknown. + type: string + enum: ["True", "False", "Unknown"] + type: + description: Type of condition. type: string - format: date-time - nullable: true required: - - type - status + - type downloadSpeed: type: object description: | @@ -271,7 +276,6 @@ spec: "Ready", "Failed", "PVCLost", - "Unknown", "Terminating", ] progress: @@ -295,7 +299,7 @@ spec: observedGeneration: type: integer description: | - Represents the .metadata.generation that the status was set based upon. + The generation last processed by the controller. additionalPrinterColumns: - name: Phase type: string diff --git a/images/virtualization-artifact/cmd/virtualization-controller/main.go b/images/virtualization-artifact/cmd/virtualization-controller/main.go index f733f8496..15994f218 100644 --- a/images/virtualization-artifact/cmd/virtualization-controller/main.go +++ b/images/virtualization-artifact/cmd/virtualization-controller/main.go @@ -43,6 +43,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/cpu" "github.com/deckhouse/virtualization-controller/pkg/controller/cvi" "github.com/deckhouse/virtualization-controller/pkg/controller/ipam" + "github.com/deckhouse/virtualization-controller/pkg/controller/vd" "github.com/deckhouse/virtualization-controller/pkg/controller/vmop" virtv2alpha1 "github.com/deckhouse/virtualization/api/core/v1alpha2" ) @@ -179,6 +180,11 @@ func main() { os.Exit(1) } + if _, err = vd.NewController(ctx, mgr, log, importerImage, uploaderImage, dvcrSettings); err != nil { + log.Error(err, "") + os.Exit(1) + } + if _, err := controller.NewVMIController(ctx, mgr, log, importerImage, uploaderImage, dvcrSettings); err != nil { log.Error(err, "") os.Exit(1) diff --git a/images/virtualization-artifact/pkg/common/percent_test.go b/images/virtualization-artifact/pkg/common/percent_test.go index 518eab51e..059b21787 100644 --- a/images/virtualization-artifact/pkg/common/percent_test.go +++ b/images/virtualization-artifact/pkg/common/percent_test.go @@ -28,6 +28,18 @@ func Test_ScalePercent(t *testing.T) { high float64 expect string }{ + { + "70%", + 0.0, + 100.0, + "70.0%", + }, + { + "0%", + 50.0, + 100.0, + "50.0%", + }, { "100.0%", 0.0, diff --git a/images/virtualization-artifact/pkg/common/vmd/util.go b/images/virtualization-artifact/pkg/common/vmd/util.go deleted file mode 100644 index 788462b23..000000000 --- a/images/virtualization-artifact/pkg/common/vmd/util.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2024 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vmd - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - virtvcore "github.com/deckhouse/virtualization/api/core" - virtv2alpha1 "github.com/deckhouse/virtualization/api/core/v1alpha2" -) - -// MakeOwnerReference makes owner reference from a ClusterVirtualImage. -func MakeOwnerReference(vmd *virtv2alpha1.VirtualDisk) metav1.OwnerReference { - return *metav1.NewControllerRef(vmd, schema.GroupVersionKind{ - Group: virtvcore.GroupName, - Version: virtv2alpha1.Version, - Kind: virtv2alpha1.VirtualDiskKind, - }) -} - -func GetDataSourceType(vmd *virtv2alpha1.VirtualDisk) string { - if vmd == nil || vmd.Spec.DataSource == nil { - return "" - } - return string(vmd.Spec.DataSource.Type) -} - -func IsDVCRSource(vmd *virtv2alpha1.VirtualDisk) bool { - if vmd == nil || vmd.Spec.DataSource == nil { - return false - } - - return vmd.Spec.DataSource.Type == virtv2alpha1.DataSourceTypeObjectRef -} - -func IsTwoPhaseImport(vmd *virtv2alpha1.VirtualDisk) bool { - if vmd == nil || vmd.Spec.DataSource == nil { - return false - } - switch vmd.Spec.DataSource.Type { - case virtv2alpha1.DataSourceTypeHTTP, - virtv2alpha1.DataSourceTypeUpload, - virtv2alpha1.DataSourceTypeContainerImage: - return true - } - return false -} - -// IsBlankPVC returns true if VMD has no DataSource: only PVC should be created. -func IsBlankPVC(vmd *virtv2alpha1.VirtualDisk) bool { - if vmd == nil { - return false - } - return vmd.Spec.DataSource == nil -} diff --git a/images/virtualization-artifact/pkg/controller/common/util.go b/images/virtualization-artifact/pkg/controller/common/util.go index 9b01399a7..5098ec1ef 100644 --- a/images/virtualization-artifact/pkg/controller/common/util.go +++ b/images/virtualization-artifact/pkg/controller/common/util.go @@ -43,6 +43,7 @@ import ( const ( CVIShortName = "cvi" + VDShortName = "vd" // AnnAPIGroup is the APIGroup for virtualization-controller. AnnAPIGroup = "virt.deckhouse.io" diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go b/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go index cf8172883..50882f8ae 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -72,15 +71,15 @@ func (h DatasourceReadyHandler) Handle(ctx context.Context, cvi *virtv2.ClusterV case errors.Is(err, source.ErrSecretNotFound): condition.Status = metav1.ConditionFalse condition.Reason = cvicondition.DatasourceReadyReason_ContainerRegistrySecretNotFound - condition.Message = strings.ToTitle(err.Error()) + condition.Message = service.CapitalizeFirstLetter(err.Error()) case errors.As(err, &source.ImageNotReadyError{}): condition.Status = metav1.ConditionFalse condition.Reason = cvicondition.DatasourceReadyReason_ImageNotReady - condition.Message = strings.ToTitle(err.Error()) + condition.Message = service.CapitalizeFirstLetter(err.Error()) case errors.As(err, &source.ClusterImageNotReadyError{}): condition.Status = metav1.ConditionFalse condition.Reason = cvicondition.DatasourceReadyReason_ClusterImageNotReady - condition.Message = strings.ToTitle(err.Error()) + condition.Message = service.CapitalizeFirstLetter(err.Error()) } return reconcile.Result{}, err diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go index cf040f271..dc6b6b051 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go @@ -78,6 +78,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma cvi.Status.Phase = virtv2.ImageReady + // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) if err != nil { return false, err diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go index fa2e86d74..800fd9498 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go @@ -84,6 +84,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu cvi.Status.Phase = virtv2.ImageReady + // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) if err != nil { return false, err diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go index 6cda67fd9..396b19e38 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go @@ -86,6 +86,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua cvi.Status.Phase = virtv2.ImageReady + // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) if err != nil { return false, err diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go index faaaa3ef0..d837112fc 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go @@ -93,6 +93,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI cvi.Status.Phase = virtv2.ImageReady + // Unprotect upload time supplements to delete them later. err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) if err != nil { return false, err diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/dv.go b/images/virtualization-artifact/pkg/controller/kvbuilder/dv.go index 8953a3ac7..36c7f98a6 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/dv.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/dv.go @@ -60,6 +60,10 @@ func (b *DV) SetPVC(storageClassName *string, size resource.Quantity) { b.Resource.Spec.PVC = pvc.CreateSpecForDataVolume(storageClassName, size) } +func (b *DV) SetDataSource(source *cdiv1.DataVolumeSource) { + b.Resource.Spec.Source = source +} + func (b *DV) SetRegistryDataSource(imageName, authSecret, caBundleConfigMap string) { url := common.DockerRegistrySchemePrefix + imageName diff --git a/images/virtualization-artifact/pkg/controller/service/disk_service.go b/images/virtualization-artifact/pkg/controller/service/disk_service.go new file mode 100644 index 000000000..6e82f1355 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/service/disk_service.go @@ -0,0 +1,277 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "errors" + "fmt" + + corev1 "k8s.io/api/core/v1" + storev1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + "github.com/deckhouse/virtualization-controller/pkg/dvcr" + "github.com/deckhouse/virtualization-controller/pkg/sdk/framework/helper" + "github.com/deckhouse/virtualization-controller/pkg/util" +) + +type DiskService struct { + client client.Client + dvcrSettings *dvcr.Settings + protection *ProtectionService +} + +func NewDiskService( + client client.Client, + dvcrSettings *dvcr.Settings, + protection *ProtectionService, +) *DiskService { + return &DiskService{ + client: client, + dvcrSettings: dvcrSettings, + protection: protection, + } +} + +func (s DiskService) Start( + ctx context.Context, + pvcSize resource.Quantity, + storageClass *string, + source *cdiv1.DataVolumeSource, + obj ObjectKind, + sup *supplements.Generator, +) error { + dvBuilder := kvbuilder.NewDV(sup.DataVolume()) + dvBuilder.SetDataSource(source) + dvBuilder.SetPVC(storageClass, pvcSize) + dvBuilder.SetOwnerRef(obj, obj.GroupVersionKind()) + + err := s.client.Create(ctx, dvBuilder.GetResource()) + if err != nil && !k8serrors.IsAlreadyExists(err) { + return err + } + + if source.Blank != nil { + return nil + } + + return supplements.EnsureForDataVolume(ctx, s.client, sup, dvBuilder.GetResource(), s.dvcrSettings) +} + +func (s DiskService) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { + subResourcesHaveDeleted, err := s.CleanUpSupplements(ctx, sup) + if err != nil { + return false, err + } + + pvc, err := s.GetPersistentVolumeClaim(ctx, sup) + if err != nil { + return false, err + } + pv, err := s.GetPersistentVolume(ctx, pvc) + if err != nil { + return false, err + } + + var resourcesHaveDeleted bool + + if pvc != nil { + resourcesHaveDeleted = true + + err = s.protection.RemoveProtection(ctx, pvc) + if err != nil { + return false, err + } + + err = s.client.Delete(ctx, pvc) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + } + + if pv != nil { + resourcesHaveDeleted = true + + err = s.protection.RemoveProtection(ctx, pv) + if err != nil { + return false, err + } + + err = s.client.Delete(ctx, pv) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + } + + return resourcesHaveDeleted || subResourcesHaveDeleted, nil +} + +func (s DiskService) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { + dv, err := s.GetDataVolume(ctx, sup) + if err != nil { + return false, err + } + + var hasDeleted bool + + if dv != nil { + hasDeleted = true + + err = s.protection.RemoveProtection(ctx, dv) + if err != nil { + return false, err + } + + err = s.client.Delete(ctx, dv) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + } + + return hasDeleted, supplements.CleanupForDataVolume(ctx, s.client, sup, s.dvcrSettings) +} + +func (s DiskService) Protect(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim, pv *corev1.PersistentVolume) error { + err := s.protection.AddOwnerRef(ctx, owner, pvc) + if err != nil { + return err + } + + return s.protection.AddProtection(ctx, dv, pvc, pv) +} + +func (s DiskService) Unprotect(ctx context.Context, dv *cdiv1.DataVolume) error { + return s.protection.RemoveProtection(ctx, dv) +} + +func (s DiskService) Resize(ctx context.Context, pvc *corev1.PersistentVolumeClaim, newSize resource.Quantity, sup *supplements.Generator) error { + if pvc == nil { + return errors.New("got nil pvc") + } + + curSize := pvc.Spec.Resources.Requests[corev1.ResourceStorage] + + if newSize.Cmp(curSize) == -1 { + return ErrTooSmallDiskSize + } + + switch newSize.Cmp(curSize) { + case 0: + return nil + case -1: + return ErrTooSmallDiskSize + default: + pvc.Spec.Resources.Requests[corev1.ResourceStorage] = newSize + + err := s.client.Update(ctx, pvc) + if err != nil { + return fmt.Errorf("failed to increase pvc size: %w", err) + } + + return nil + } +} + +func (s DiskService) GetProgress(dv *cdiv1.DataVolume, prevProgress string, opts ...GetProgressOption) string { + if dv == nil { + return prevProgress + } + + dvProgress := string(dv.Status.Progress) + if dvProgress != "N/A" && dvProgress != "" { + for _, o := range opts { + dvProgress = o.Apply(dvProgress) + } + + return dvProgress + } + + return prevProgress +} + +func (s DiskService) GetCapacity(pvc *corev1.PersistentVolumeClaim) string { + if pvc != nil && pvc.Status.Phase == corev1.ClaimBound { + return util.GetPointer(pvc.Status.Capacity[corev1.ResourceStorage]).String() + } + + return "" +} + +func (s DiskService) GetDataVolume(ctx context.Context, sup *supplements.Generator) (*cdiv1.DataVolume, error) { + return helper.FetchObject(ctx, sup.DataVolume(), s.client, &cdiv1.DataVolume{}) +} + +func (s DiskService) GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + return helper.FetchObject(ctx, sup.PersistentVolumeClaim(), s.client, &corev1.PersistentVolumeClaim{}) +} + +func (s DiskService) GetPersistentVolume(ctx context.Context, pvc *corev1.PersistentVolumeClaim) (*corev1.PersistentVolume, error) { + if pvc == nil { + return nil, nil + } + + return helper.FetchObject(ctx, types.NamespacedName{Name: pvc.Spec.VolumeName}, s.client, &corev1.PersistentVolume{}) +} + +func (s DiskService) CheckStorageClass(ctx context.Context, storageClassName *string) error { + if storageClassName == nil || *storageClassName == "" { + return s.checkDefaultStorageClass(ctx) + } + + return s.checkStorageClass(ctx, *storageClassName) +} + +func (s DiskService) checkDefaultStorageClass(ctx context.Context) error { + var scs storev1.StorageClassList + err := s.client.List(ctx, &scs, &client.ListOptions{}) + if err != nil { + return err + } + + for _, sc := range scs.Items { + // TODO comment. + if sc.Annotations[common.AnnDefaultStorageClass] == "true" { + return nil + } + } + + return ErrDefaultStorageClassNotFound +} + +func (s DiskService) checkStorageClass(ctx context.Context, storageClassName string) error { + var sc storev1.StorageClass + err := s.client.Get(ctx, types.NamespacedName{ + Name: storageClassName, + }, &sc, &client.GetOptions{}) + if err != nil { + if k8serrors.IsNotFound(err) { + return ErrStorageClassNotFound + } + + return err + } + + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/service/errors.go b/images/virtualization-artifact/pkg/controller/service/errors.go new file mode 100644 index 000000000..3672b3554 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/service/errors.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import "errors" + +var ErrTooSmallDiskSize = errors.New("virtual disk size is too small") + +var ( + ErrStorageClassNotFound = errors.New("storage class not found") + ErrDefaultStorageClassNotFound = errors.New("default storage class not found") +) diff --git a/images/virtualization-artifact/pkg/controller/service/importer_service.go b/images/virtualization-artifact/pkg/controller/service/importer_service.go index 806b599a7..b0cc7bdfc 100644 --- a/images/virtualization-artifact/pkg/controller/service/importer_service.go +++ b/images/virtualization-artifact/pkg/controller/service/importer_service.go @@ -73,10 +73,10 @@ func (s ImporterService) Start(ctx context.Context, settings *importer.Settings, } func (s ImporterService) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { - return s.CleanUpSubResources(ctx, sup) + return s.CleanUpSupplements(ctx, sup) } -func (s ImporterService) CleanUpSubResources(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s ImporterService) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { pod, err := s.GetPod(ctx, sup) if err != nil { return false, err diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/attachee.go b/images/virtualization-artifact/pkg/controller/vd/internal/attachee.go new file mode 100644 index 000000000..96b4397a3 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/attachee.go @@ -0,0 +1,91 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type AttacheeHandler struct { + client client.Client +} + +func NewAttacheeHandler(client client.Client) *AttacheeHandler { + return &AttacheeHandler{ + client: client, + } +} + +func (h AttacheeHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { + attachedVMs, err := h.getAttachedVM(ctx, vd) + if err != nil { + return reconcile.Result{}, err + } + + vd.Status.AttachedToVirtualMachines = nil + + for _, vm := range attachedVMs { + vd.Status.AttachedToVirtualMachines = append(vd.Status.AttachedToVirtualMachines, virtv2.AttachedVirtualMachine{ + Name: vm.Name, + }) + } + + if len(vd.Status.AttachedToVirtualMachines) > 0 { + controllerutil.AddFinalizer(vd, virtv2.FinalizerVMDProtection) + return reconcile.Result{}, nil + } + + controllerutil.RemoveFinalizer(vd, virtv2.FinalizerVMDProtection) + return reconcile.Result{}, nil +} + +func (h AttacheeHandler) getAttachedVM(ctx context.Context, vd client.Object) ([]virtv2.VirtualMachine, error) { + var vms virtv2.VirtualMachineList + err := h.client.List(ctx, &vms, &client.ListOptions{ + Namespace: vd.GetNamespace(), + }) + if err != nil { + return nil, fmt.Errorf("error getting virtual machines: %w", err) + } + + var attachedVMs []virtv2.VirtualMachine + + for _, vm := range vms.Items { + if h.isVDAttachedToVM(vd.GetName(), vm) { + attachedVMs = append(attachedVMs, vm) + } + } + + return attachedVMs, nil +} + +func (h AttacheeHandler) isVDAttachedToVM(vdName string, vm virtv2.VirtualMachine) bool { + for _, bda := range vm.Status.BlockDeviceRefs { + if bda.Kind == virtv2.DiskDevice && bda.Name == vdName { + return true + } + } + + return false +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go new file mode 100644 index 000000000..e02097200 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go @@ -0,0 +1,98 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "errors" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type DatasourceReadyHandler struct { + sources Sources + blank source.Handler +} + +func NewDatasourceReadyHandler(blank source.Handler, sources Sources) *DatasourceReadyHandler { + return &DatasourceReadyHandler{ + blank: blank, + sources: sources, + } +} + +func (h DatasourceReadyHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { + condition, ok := service.GetCondition(vdcondition.DatasourceReadyType, vd.Status.Conditions) + if !ok { + condition = metav1.Condition{ + Type: vdcondition.DatasourceReadyType, + Status: metav1.ConditionUnknown, + } + } + + defer func() { service.SetCondition(condition, &vd.Status.Conditions) }() + + if vd.DeletionTimestamp != nil { + condition.Status = metav1.ConditionUnknown + condition.Reason = "" + condition.Message = "" + return reconcile.Result{}, nil + } + + var ds source.Handler + if vd.Spec.DataSource == nil { + ds = h.blank + } else { + ds, ok = h.sources.Get(vd.Spec.DataSource.Type) + if !ok { + return reconcile.Result{}, fmt.Errorf("data source validator not found for type: %s", vd.Spec.DataSource.Type) + } + } + + err := ds.Validate(ctx, vd) + switch { + case err == nil: + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.DatasourceReadyReason_DatasourceReady + condition.Message = "" + return reconcile.Result{}, nil + case errors.Is(err, source.ErrSecretNotFound): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.DatasourceReadyReason_ContainerRegistrySecretNotFound + condition.Message = service.CapitalizeFirstLetter(err.Error()) + "." + return reconcile.Result{}, nil + case errors.As(err, &source.ImageNotReadyError{}): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.DatasourceReadyReason_ImageNotReady + condition.Message = service.CapitalizeFirstLetter(err.Error()) + "." + return reconcile.Result{}, nil + case errors.As(err, &source.ClusterImageNotReadyError{}): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.DatasourceReadyReason_ClusterImageNotReady + condition.Message = service.CapitalizeFirstLetter(err.Error()) + "." + return reconcile.Result{}, nil + default: + return reconcile.Result{}, err + } +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go new file mode 100644 index 000000000..e4b152bae --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +func TestDatasourceReadyHandler_Handle(t *testing.T) { + ctx := context.TODO() + blank := &HandlerMock{ + ValidateFunc: func(_ context.Context, _ *virtv2.VirtualDisk) error { + return nil + }, + } + sources := &SourcesMock{ + GetFunc: func(dsType virtv2.DataSourceType) (source.Handler, bool) { + return blank, true + }, + } + + t.Run("VirtualDisk with DeletionTimestamp", func(t *testing.T) { + vd := virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + }, + } + + handler := NewDatasourceReadyHandler(nil, nil) + _, err := handler.Handle(ctx, &vd) + require.NoError(t, err) + + condition := vd.Status.Conditions[0] + require.Equal(t, vdcondition.DatasourceReadyType, condition.Type) + require.Equal(t, metav1.ConditionUnknown, condition.Status) + require.Equal(t, "", condition.Reason) + }) + + t.Run("VirtualDisk with Blank DataSource", func(t *testing.T) { + vd := virtv2.VirtualDisk{} + + handler := NewDatasourceReadyHandler(blank, nil) + _, err := handler.Handle(ctx, &vd) + require.NoError(t, err) + + condition := vd.Status.Conditions[0] + require.Equal(t, vdcondition.DatasourceReadyType, condition.Type) + require.Equal(t, metav1.ConditionTrue, condition.Status) + require.Equal(t, vdcondition.DatasourceReadyReason_DatasourceReady, condition.Reason) + }) + + t.Run("VirtualDisk with Non Blank DataSource", func(t *testing.T) { + vd := virtv2.VirtualDisk{ + Spec: virtv2.VirtualDiskSpec{ + DataSource: &virtv2.VirtualDiskDataSource{ + Type: "NonBlank", + }, + }, + } + + handler := NewDatasourceReadyHandler(nil, sources) + _, err := handler.Handle(ctx, &vd) + require.NoError(t, err) + + condition := vd.Status.Conditions[0] + require.Equal(t, vdcondition.DatasourceReadyType, condition.Type) + require.Equal(t, metav1.ConditionTrue, condition.Status) + require.Equal(t, vdcondition.DatasourceReadyReason_DatasourceReady, condition.Reason) + }) +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go new file mode 100644 index 000000000..060831fbf --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type DeletionHandler struct { + sources *source.Sources +} + +func NewDeletionHandler(sources *source.Sources) *DeletionHandler { + return &DeletionHandler{ + sources: sources, + } +} + +func (h DeletionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { + if vd.DeletionTimestamp != nil { + requeue, err := h.sources.CleanUp(ctx, vd) + if err != nil { + return reconcile.Result{}, err + } + + if requeue { + return reconcile.Result{Requeue: true}, nil + } + + controllerutil.RemoveFinalizer(vd, virtv2.FinalizerVDCleanup) + return reconcile.Result{}, nil + } + + controllerutil.AddFinalizer(vd, virtv2.FinalizerVDCleanup) + return reconcile.Result{}, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go new file mode 100644 index 000000000..3f8a94c08 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go @@ -0,0 +1,30 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +//go:generate moq -rm -out mock.go . Handler Sources + +type Handler = source.Handler + +type Sources interface { + Get(dsType virtv2.DataSourceType) (source.Handler, bool) +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go new file mode 100644 index 000000000..a3ec3f2da --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go @@ -0,0 +1,107 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type LifeCycleHandler struct { + client client.Client + blank source.Handler + sources *source.Sources +} + +func NewLifeCycleHandler(blank source.Handler, sources *source.Sources, client client.Client) *LifeCycleHandler { + return &LifeCycleHandler{ + client: client, + blank: blank, + sources: sources, + } +} + +func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { + readyCondition, ok := service.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + if !ok { + readyCondition = metav1.Condition{ + Type: vdcondition.ReadyType, + Status: metav1.ConditionUnknown, + } + + service.SetCondition(readyCondition, &vd.Status.Conditions) + } + + if vd.DeletionTimestamp != nil { + vd.Status.Phase = virtv2.DiskTerminating + return reconcile.Result{}, nil + } + + if vd.Status.Phase == "" { + vd.Status.Phase = virtv2.DiskPending + } + + dataSourceReadyCondition, exists := service.GetCondition(vdcondition.DatasourceReadyType, vd.Status.Conditions) + if !exists { + return reconcile.Result{}, fmt.Errorf("condition %s not found, but required", vdcondition.DatasourceReadyType) + } + + if dataSourceReadyCondition.Status != metav1.ConditionTrue { + return reconcile.Result{}, nil + } + + if readyCondition.Status != metav1.ConditionTrue && readyCondition.Reason != vdcondition.ReadyReason_Lost && h.sources.Changed(ctx, vd) { + vd.Status = virtv2.VirtualDiskStatus{ + Phase: virtv2.DiskPending, + Conditions: vd.Status.Conditions, + ObservedGeneration: vd.Status.ObservedGeneration, + } + + _, err := h.sources.CleanUp(ctx, vd) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{Requeue: true}, nil + } + + var ds source.Handler + if vd.Spec.DataSource == nil { + ds = h.blank + } else { + ds, exists = h.sources.Get(vd.Spec.DataSource.Type) + if !exists { + return reconcile.Result{}, fmt.Errorf("data source runner not found for type: %s", vd.Spec.DataSource.Type) + } + } + + requeue, err := ds.Sync(ctx, vd) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{Requeue: requeue}, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/mock.go b/images/virtualization-artifact/pkg/controller/vd/internal/mock.go new file mode 100644 index 000000000..941a9fd85 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/mock.go @@ -0,0 +1,249 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package internal + +import ( + "context" + "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "sync" +) + +// Ensure, that HandlerMock does implement Handler. +// If this is not the case, regenerate this file with moq. +var _ Handler = &HandlerMock{} + +// HandlerMock is a mock implementation of Handler. +// +// func TestSomethingThatUsesHandler(t *testing.T) { +// +// // make and configure a mocked Handler +// mockedHandler := &HandlerMock{ +// CleanUpFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +// panic("mock out the CleanUp method") +// }, +// SyncFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +// panic("mock out the Sync method") +// }, +// ValidateFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) error { +// panic("mock out the Validate method") +// }, +// } +// +// // use mockedHandler in code that requires Handler +// // and then make assertions. +// +// } +type HandlerMock struct { + // CleanUpFunc mocks the CleanUp method. + CleanUpFunc func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + + // SyncFunc mocks the Sync method. + SyncFunc func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + + // ValidateFunc mocks the Validate method. + ValidateFunc func(ctx context.Context, vd *virtv2.VirtualDisk) error + + // calls tracks calls to the methods. + calls struct { + // CleanUp holds details about calls to the CleanUp method. + CleanUp []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // Vd is the vd argument value. + Vd *virtv2.VirtualDisk + } + // Sync holds details about calls to the Sync method. + Sync []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // Vd is the vd argument value. + Vd *virtv2.VirtualDisk + } + // Validate holds details about calls to the Validate method. + Validate []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // Vd is the vd argument value. + Vd *virtv2.VirtualDisk + } + } + lockCleanUp sync.RWMutex + lockSync sync.RWMutex + lockValidate sync.RWMutex +} + +// CleanUp calls CleanUpFunc. +func (mock *HandlerMock) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + if mock.CleanUpFunc == nil { + panic("HandlerMock.CleanUpFunc: method is nil but Handler.CleanUp was just called") + } + callInfo := struct { + Ctx context.Context + Vd *virtv2.VirtualDisk + }{ + Ctx: ctx, + Vd: vd, + } + mock.lockCleanUp.Lock() + mock.calls.CleanUp = append(mock.calls.CleanUp, callInfo) + mock.lockCleanUp.Unlock() + return mock.CleanUpFunc(ctx, vd) +} + +// CleanUpCalls gets all the calls that were made to CleanUp. +// Check the length with: +// +// len(mockedHandler.CleanUpCalls()) +func (mock *HandlerMock) CleanUpCalls() []struct { + Ctx context.Context + Vd *virtv2.VirtualDisk +} { + var calls []struct { + Ctx context.Context + Vd *virtv2.VirtualDisk + } + mock.lockCleanUp.RLock() + calls = mock.calls.CleanUp + mock.lockCleanUp.RUnlock() + return calls +} + +// Sync calls SyncFunc. +func (mock *HandlerMock) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + if mock.SyncFunc == nil { + panic("HandlerMock.SyncFunc: method is nil but Handler.Sync was just called") + } + callInfo := struct { + Ctx context.Context + Vd *virtv2.VirtualDisk + }{ + Ctx: ctx, + Vd: vd, + } + mock.lockSync.Lock() + mock.calls.Sync = append(mock.calls.Sync, callInfo) + mock.lockSync.Unlock() + return mock.SyncFunc(ctx, vd) +} + +// SyncCalls gets all the calls that were made to Sync. +// Check the length with: +// +// len(mockedHandler.SyncCalls()) +func (mock *HandlerMock) SyncCalls() []struct { + Ctx context.Context + Vd *virtv2.VirtualDisk +} { + var calls []struct { + Ctx context.Context + Vd *virtv2.VirtualDisk + } + mock.lockSync.RLock() + calls = mock.calls.Sync + mock.lockSync.RUnlock() + return calls +} + +// Validate calls ValidateFunc. +func (mock *HandlerMock) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { + if mock.ValidateFunc == nil { + panic("HandlerMock.ValidateFunc: method is nil but Handler.Validate was just called") + } + callInfo := struct { + Ctx context.Context + Vd *virtv2.VirtualDisk + }{ + Ctx: ctx, + Vd: vd, + } + mock.lockValidate.Lock() + mock.calls.Validate = append(mock.calls.Validate, callInfo) + mock.lockValidate.Unlock() + return mock.ValidateFunc(ctx, vd) +} + +// ValidateCalls gets all the calls that were made to Validate. +// Check the length with: +// +// len(mockedHandler.ValidateCalls()) +func (mock *HandlerMock) ValidateCalls() []struct { + Ctx context.Context + Vd *virtv2.VirtualDisk +} { + var calls []struct { + Ctx context.Context + Vd *virtv2.VirtualDisk + } + mock.lockValidate.RLock() + calls = mock.calls.Validate + mock.lockValidate.RUnlock() + return calls +} + +// Ensure, that SourcesMock does implement Sources. +// If this is not the case, regenerate this file with moq. +var _ Sources = &SourcesMock{} + +// SourcesMock is a mock implementation of Sources. +// +// func TestSomethingThatUsesSources(t *testing.T) { +// +// // make and configure a mocked Sources +// mockedSources := &SourcesMock{ +// GetFunc: func(dsType virtv2.DataSourceType) (source.Handler, bool) { +// panic("mock out the Get method") +// }, +// } +// +// // use mockedSources in code that requires Sources +// // and then make assertions. +// +// } +type SourcesMock struct { + // GetFunc mocks the Get method. + GetFunc func(dsType virtv2.DataSourceType) (source.Handler, bool) + + // calls tracks calls to the methods. + calls struct { + // Get holds details about calls to the Get method. + Get []struct { + // DsType is the dsType argument value. + DsType virtv2.DataSourceType + } + } + lockGet sync.RWMutex +} + +// Get calls GetFunc. +func (mock *SourcesMock) Get(dsType virtv2.DataSourceType) (source.Handler, bool) { + if mock.GetFunc == nil { + panic("SourcesMock.GetFunc: method is nil but Sources.Get was just called") + } + callInfo := struct { + DsType virtv2.DataSourceType + }{ + DsType: dsType, + } + mock.lockGet.Lock() + mock.calls.Get = append(mock.calls.Get, callInfo) + mock.lockGet.Unlock() + return mock.GetFunc(dsType) +} + +// GetCalls gets all the calls that were made to Get. +// Check the length with: +// +// len(mockedSources.GetCalls()) +func (mock *SourcesMock) GetCalls() []struct { + DsType virtv2.DataSourceType +} { + var calls []struct { + DsType virtv2.DataSourceType + } + mock.lockGet.RLock() + calls = mock.calls.Get + mock.lockGet.RUnlock() + return calls +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go b/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go new file mode 100644 index 000000000..80c62def6 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go @@ -0,0 +1,117 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "errors" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type ResizingHandler struct { + diskService *service.DiskService +} + +func NewResizingHandler(diskService *service.DiskService) *ResizingHandler { + return &ResizingHandler{ + diskService: diskService, + } +} + +func (h ResizingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { + condition, ok := service.GetCondition(vdcondition.ResizedType, vd.Status.Conditions) + if !ok { + condition = metav1.Condition{ + Type: vdcondition.ResizedType, + Status: metav1.ConditionUnknown, + } + } + + defer func() { service.SetCondition(condition, &vd.Status.Conditions) }() + + if vd.DeletionTimestamp != nil { + condition.Status = metav1.ConditionUnknown + condition.Reason = "" + condition.Message = "" + return reconcile.Result{}, nil + } + + newSize := vd.Spec.PersistentVolumeClaim.Size + if newSize == nil { + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ResizedReason_NotRequested + condition.Message = "" + return reconcile.Result{}, nil + } + + readyCondition, ok := service.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + if !ok || readyCondition.Status != metav1.ConditionTrue { + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ResizedReason_NotRequested + condition.Message = "" + return reconcile.Result{}, nil + } + + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + pvc, err := h.diskService.GetPersistentVolumeClaim(ctx, supgen) + if err != nil { + return reconcile.Result{}, err + } + + if pvc == nil { + return reconcile.Result{}, errors.New("pvc not found for ready virtual disk") + } + + if newSize.Equal(pvc.Status.Capacity[corev1.ResourceStorage]) { + if condition.Reason == vdcondition.ResizedReason_InProgress { + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ResizedReason_Resized + condition.Message = "" + return reconcile.Result{}, nil + } + + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ResizedReason_NotRequested + condition.Message = "" + return reconcile.Result{}, nil + } + + err = h.diskService.Resize(ctx, pvc, *newSize, supgen) + switch { + case err == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ResizedReason_InProgress + condition.Message = "The virtual disk is in the process of resizing." + return reconcile.Result{}, nil + case errors.Is(err, service.ErrTooSmallDiskSize): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ResizedReason_TooSmallDiskSize + condition.Message = "The new size of the virtual disk must not be smaller than the current size." + return reconcile.Result{}, nil + default: + return reconcile.Result{}, err + } +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go new file mode 100644 index 000000000..348fa9545 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +// func TestResizedHandler_Handle(t *testing.T) { +// ctx := context.TODO() +// +// t.Run("VirtualDisk with DeletionTimestamp", func(t *testing.T) { +// vd := virtv2.VirtualDisk{ +// ObjectMeta: metav1.ObjectMeta{ +// DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, +// }, +// } +// +// handler := NewResizingHandler(nil) +// _, err := handler.Handle(ctx, &vd) +// require.NoError(t, err) +// +// condition := vd.Status.Conditions[0] +// require.Equal(t, vdcondition.ResizedType, condition.Type) +// require.Equal(t, metav1.ConditionUnknown, condition.Status) +// require.Equal(t, "", condition.Reason) +// }) +// +// t.Run("Resize VirtualDisk", func(t *testing.T) { +// vd := virtv2.VirtualDisk{ +// Spec: virtv2.VirtualDiskSpec{ +// PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ +// Size: resource.NewQuantity(1111, resource.BinarySI), +// }, +// }, +// Status: virtv2.VirtualDiskStatus{ +// Conditions: []metav1.Condition{ +// { +// Type: vdcondition.ReadyType, +// Status: metav1.ConditionTrue, +// }, +// }, +// }, +// } +// +// resizer := DiskMock{ +// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +// return &corev1.PersistentVolumeClaim{ +// Status: corev1.PersistentVolumeClaimStatus{ +// Capacity: corev1.ResourceList{ +// corev1.ResourceStorage: *resource.NewQuantity(2222, resource.BinarySI), +// }, +// }, +// }, nil +// }, +// ResizeFunc: func(ctx context.Context, newSize resource.Quantity, sup *supplements.Generator) error { +// return nil +// }, +// } +// +// handler := NewResizedHandler(&resizer) +// err := handler.Handle(ctx, &vd) +// require.NoError(t, err) +// +// condition, ok := getCondition(vdcondition.ResizedType, vd.Status.Conditions) +// require.True(t, ok) +// require.Equal(t, vdcondition.ResizedType, condition.Type) +// require.Equal(t, metav1.ConditionFalse, condition.Status) +// require.Equal(t, vdcondition.ResizedReason_InProgress, condition.Reason) +// }) +// +// t.Run("VirtualDisk resized", func(t *testing.T) { +// size := resource.NewQuantity(1111, resource.BinarySI) +// +// vd := virtv2.VirtualDisk{ +// Spec: virtv2.VirtualDiskSpec{ +// PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ +// Size: size, +// }, +// }, +// Status: virtv2.VirtualDiskStatus{ +// Conditions: []metav1.Condition{ +// { +// Type: vdcondition.ReadyType, +// Status: metav1.ConditionTrue, +// }, +// { +// Type: vdcondition.ResizedType, +// Reason: vdcondition.ResizedReason_InProgress, +// Status: metav1.ConditionFalse, +// }, +// }, +// }, +// } +// +// resizer := DiskMock{ +// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +// return &corev1.PersistentVolumeClaim{ +// Status: corev1.PersistentVolumeClaimStatus{ +// Capacity: corev1.ResourceList{ +// corev1.ResourceStorage: *size, +// }, +// }, +// }, nil +// }, +// } +// +// handler := NewResizedHandler(&resizer) +// err := handler.Handle(ctx, &vd) +// require.NoError(t, err) +// +// condition, ok := getCondition(vdcondition.ResizedType, vd.Status.Conditions) +// require.True(t, ok) +// require.Equal(t, vdcondition.ResizedType, condition.Type) +// require.Equal(t, metav1.ConditionTrue, condition.Status) +// require.Equal(t, "", condition.Reason) +// }) +// } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go new file mode 100644 index 000000000..e7d955d44 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go @@ -0,0 +1,219 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + "errors" + "fmt" + "log/slog" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + + "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type BlankDataSource struct { + statService *service.StatService + diskService *service.DiskService + logger *slog.Logger +} + +func NewBlankDataSource( + statService *service.StatService, + diskService *service.DiskService, +) *BlankDataSource { + return &BlankDataSource{ + statService: statService, + diskService: diskService, + logger: slog.Default().With("controller", common.VDShortName, "ds", "registry"), + } +} + +func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + ds.logger.Info("Sync", "vd", vd.Name) + + condition, _ := service.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + defer func() { service.SetCondition(condition, &vd.Status.Conditions) }() + + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + dv, err := ds.diskService.GetDataVolume(ctx, supgen) + if err != nil { + return false, err + } + pvc, err := ds.diskService.GetPersistentVolumeClaim(ctx, supgen) + if err != nil { + return false, err + } + pv, err := ds.diskService.GetPersistentVolume(ctx, pvc) + if err != nil { + return false, err + } + + switch { + case isDiskProvisioningFinished(condition): + ds.logger.Info("Finishing...", "vd", vd.Name) + + switch { + case pvc == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PVC %s not found.", supgen.PersistentVolumeClaim().String()) + case pv == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PV %s not found.", pvc.Spec.VolumeName) + default: + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + } + + // Protect Ready Disk and underlying PVC and PV. + err = ds.diskService.Protect(ctx, vd, nil, pvc, pv) + if err != nil { + return false, err + } + + err = ds.diskService.Unprotect(ctx, dv) + if err != nil { + return false, err + } + + return CleanUpSupplements(ctx, vd, ds) + case common.AnyTerminating(dv, pvc, pv): + ds.logger.Info("Cleaning up...", "vd", vd.Name) + case dv == nil: + var diskSize resource.Quantity + diskSize, err = ds.getPVCSize(vd) + if err != nil { + return false, err + } + + source := ds.getSource() + + err = ds.diskService.Start(ctx, diskSize, vd.Spec.PersistentVolumeClaim.StorageClass, source, vd, supgen) + if err != nil { + return false, err + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "PVC Provisioner not found: create the new one." + + vd.Status.Progress = "0%" + + ds.logger.Info("Create data volume...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", "nil") + + return true, nil + case common.IsDataVolumeComplete(dv): + vd.Status.Phase = virtv2.DiskReady + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + + vd.Status.Progress = "100%" + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + ds.logger.Info("Ready", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + default: + ds.logger.Info("Provisioning...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + + vd.Status.Progress = ds.diskService.GetProgress(dv, vd.Status.Progress, service.NewScaleOption(0, 100)) + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + err = ds.diskService.Protect(ctx, vd, dv, pvc, pv) + if err != nil { + return false, err + } + + err = ds.diskService.CheckStorageClass(ctx, vd.Spec.PersistentVolumeClaim.StorageClass) + switch { + case err == nil: + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "Import is in the process of provisioning to PVC." + return false, nil + case errors.Is(err, service.ErrStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Provided StorageClass not found in the cluster." + return false, nil + case errors.Is(err, service.ErrDefaultStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Default StorageClass not found in the cluster: please provide a StorageClass name or set a default StorageClass." + default: + return false, err + } + } + + return true, nil +} + +func (ds BlankDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + requeue, err := ds.diskService.CleanUp(ctx, supgen) + if err != nil { + return false, err + } + + return requeue, nil +} + +func (ds BlankDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + requeue, err := ds.diskService.CleanUpSupplements(ctx, supgen) + if err != nil { + return false, err + } + + return requeue, nil +} + +func (ds BlankDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { + return nil +} + +func (ds BlankDataSource) getSource() *cdiv1.DataVolumeSource { + return &cdiv1.DataVolumeSource{ + Blank: &cdiv1.DataVolumeBlankImage{}, + } +} + +func (ds BlankDataSource) getPVCSize(vd *virtv2.VirtualDisk) (resource.Quantity, error) { + pvcSize := vd.Spec.PersistentVolumeClaim.Size + if pvcSize == nil || pvcSize.IsZero() { + return resource.Quantity{}, errors.New("spec.persistentVolumeClaim.size should be set for blank virtual disk") + } + + return *pvcSize, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/errors.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/errors.go new file mode 100644 index 000000000..5ac9b5859 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/errors.go @@ -0,0 +1,55 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "errors" + "fmt" +) + +var ( + ErrSecretNotFound = errors.New("container registry secret not found") + ErrPVCSizeSmallerImageVirtualSize = errors.New("persistentVolumeClaim size is smaller than image virtual size") +) + +type ImageNotReadyError struct { + name string +} + +func (e ImageNotReadyError) Error() string { + return fmt.Sprintf("ClusterImage %s not ready", e.name) +} + +func NewImageNotReadyError(name string) error { + return ImageNotReadyError{ + name: name, + } +} + +type ClusterImageNotReadyError struct { + name string +} + +func (e ClusterImageNotReadyError) Error() string { + return fmt.Sprintf("ClusterImage %s not ready", e.name) +} + +func NewClusterImageNotReadyError(name string) error { + return ClusterImageNotReadyError{ + name: name, + } +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go new file mode 100644 index 000000000..b17457b36 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go @@ -0,0 +1,364 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + "errors" + "fmt" + "log/slog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + + cc "github.com/deckhouse/virtualization-controller/pkg/common" + "github.com/deckhouse/virtualization-controller/pkg/common/datasource" + vdutil "github.com/deckhouse/virtualization-controller/pkg/common/datavolume" + "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/importer" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + "github.com/deckhouse/virtualization-controller/pkg/dvcr" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type HTTPDataSource struct { + statService *service.StatService + importerService *service.ImporterService + diskService *service.DiskService + dvcrSettings *dvcr.Settings + logger *slog.Logger +} + +func NewHTTPDataSource( + statService *service.StatService, + importerService *service.ImporterService, + diskService *service.DiskService, + dvcrSettings *dvcr.Settings, +) *HTTPDataSource { + return &HTTPDataSource{ + statService: statService, + importerService: importerService, + diskService: diskService, + dvcrSettings: dvcrSettings, + logger: slog.Default().With("controller", common.VDShortName, "ds", "http"), + } +} + +func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + ds.logger.Info("Sync", "vd", vd.Name) + + condition, _ := service.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + defer func() { service.SetCondition(condition, &vd.Status.Conditions) }() + + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + pod, err := ds.importerService.GetPod(ctx, supgen) + if err != nil { + return false, err + } + dv, err := ds.diskService.GetDataVolume(ctx, supgen) + if err != nil { + return false, err + } + pvc, err := ds.diskService.GetPersistentVolumeClaim(ctx, supgen) + if err != nil { + return false, err + } + pv, err := ds.diskService.GetPersistentVolume(ctx, pvc) + if err != nil { + return false, err + } + + switch { + case isDiskProvisioningFinished(condition): + ds.logger.Info("Finishing...", "vd", vd.Name) + + switch { + case pvc == nil: + vd.Status.Phase = virtv2.DiskLost + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PVC %s not found.", supgen.PersistentVolumeClaim().String()) + case pv == nil: + vd.Status.Phase = virtv2.DiskLost + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PV %s not found.", pvc.Spec.VolumeName) + default: + vd.Status.Phase = virtv2.DiskReady + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + } + + // Protect Ready Disk and underlying PVC and PV. + err = ds.diskService.Protect(ctx, vd, nil, pvc, pv) + if err != nil { + return false, err + } + + // Unprotect import time supplements to delete them later. + err = ds.importerService.Unprotect(ctx, pod) + if err != nil { + return false, err + } + + err = ds.diskService.Unprotect(ctx, dv) + if err != nil { + return false, err + } + + return CleanUpSupplements(ctx, vd, ds) + case common.AnyTerminating(pod, dv, pvc, pv): + ds.logger.Info("Cleaning up...", "vd", vd.Name) + case pod == nil: + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "DVCR Provisioner not found: create the new one." + + vd.Status.Progress = "0%" + + envSettings := ds.getEnvSettings(vd, supgen) + err = ds.importerService.Start(ctx, envSettings, vd, supgen, datasource.NewCABundleForVMD(vd.Spec.DataSource)) + if err != nil { + return false, err + } + + ds.logger.Info("Create importer pod...", "vd", vd.Name, "progress", vd.Status.Progress, "pod.phase", "nil") + case !common.IsPodComplete(pod): + err = ds.statService.CheckPod(pod) + if err != nil { + vd.Status.Phase = virtv2.DiskFailed + + switch { + case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningNotStarted + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + case errors.Is(err, service.ErrProvisioningFailed): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + default: + return false, err + } + } + + err = ds.importerService.Protect(ctx, pod) + if err != nil { + return false, err + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "Import is in the process of provisioning to DVCR." + + vd.Status.Progress = ds.statService.GetProgress(vd.GetUID(), pod, vd.Status.Progress, service.NewScaleOption(0, 50)) + + ds.logger.Info("Provisioning...", "vd", vd.Name, "progress", vd.Status.Progress, "pod.phase", pod.Status.Phase) + case dv == nil: + err = ds.statService.CheckPod(pod) + if err != nil { + vd.Status.Phase = virtv2.DiskFailed + + switch { + case errors.Is(err, service.ErrProvisioningFailed): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + default: + return false, err + } + } + + var diskSize resource.Quantity + diskSize, err = ds.getPVCSize(vd, pod) + if err != nil { + return false, err + } + + source := ds.getSource(vd, supgen) + + err = ds.diskService.Start(ctx, diskSize, vd.Spec.PersistentVolumeClaim.StorageClass, source, vd, supgen) + if err != nil { + return false, err + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "PVC Provisioner not found: create the new one." + + vd.Status.Progress = "50%" + + ds.logger.Info("Create data volume...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", "nil") + + return true, nil + case common.IsDataVolumeComplete(dv): + vd.Status.Phase = virtv2.DiskReady + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + + vd.Status.Progress = "100%" + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + ds.logger.Info("Ready", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + default: + ds.logger.Info("Provisioning...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + + vd.Status.Progress = ds.diskService.GetProgress(dv, vd.Status.Progress, service.NewScaleOption(50, 100)) + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + err = ds.diskService.Protect(ctx, vd, dv, pvc, pv) + if err != nil { + return false, err + } + + err = ds.diskService.CheckStorageClass(ctx, vd.Spec.PersistentVolumeClaim.StorageClass) + switch { + case err == nil: + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "Import is in the process of provisioning to PVC." + return false, nil + case errors.Is(err, service.ErrStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Provided StorageClass not found in the cluster." + return false, nil + case errors.Is(err, service.ErrDefaultStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Default StorageClass not found in the cluster: please provide a StorageClass name or set a default StorageClass." + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (ds HTTPDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) + if err != nil { + return false, err + } + + diskRequeue, err := ds.diskService.CleanUp(ctx, supgen) + if err != nil { + return false, err + } + + return importerRequeue || diskRequeue, nil +} + +func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { + return nil +} + +func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) + if err != nil { + return false, err + } + + diskRequeue, err := ds.diskService.CleanUpSupplements(ctx, supgen) + if err != nil { + return false, err + } + + return importerRequeue || diskRequeue, nil +} + +func (ds HTTPDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supplements.Generator) *importer.Settings { + var settings importer.Settings + + importer.ApplyHTTPSourceSettings(&settings, vd.Spec.DataSource.HTTP, supgen) + importer.ApplyDVCRDestinationSettings( + &settings, + ds.dvcrSettings, + supgen, + ds.dvcrSettings.RegistryImageForVMD(vd.Name, vd.Namespace), + ) + + return &settings +} + +func (ds HTTPDataSource) getSource(vd *virtv2.VirtualDisk, sup *supplements.Generator) *cdiv1.DataVolumeSource { + // The image was preloaded from source into dvcr. + // We can't use the same data source a second time, but we can set dvcr as the data source. + // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. + dvcrSourceImageName := ds.dvcrSettings.RegistryImageForVMD(vd.Name, vd.Namespace) + + url := cc.DockerRegistrySchemePrefix + dvcrSourceImageName + secretName := sup.DVCRAuthSecretForDV().Name + certConfigMapName := sup.DVCRCABundleConfigMapForDV().Name + + return &cdiv1.DataVolumeSource{ + Registry: &cdiv1.DataVolumeSourceRegistry{ + URL: &url, + SecretRef: &secretName, + CertConfigMap: &certConfigMapName, + }, + } +} + +func (ds HTTPDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { + // Get size from the importer Pod to detect if specified PVC size is enough. + unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) + if err != nil { + return resource.Quantity{}, err + } + + if unpackedSize.IsZero() { + return resource.Quantity{}, errors.New("got zero unpacked size from data source") + } + + pvcSize := vd.Spec.PersistentVolumeClaim.Size + if pvcSize != nil && !pvcSize.IsZero() && pvcSize.Cmp(unpackedSize) == -1 { + return resource.Quantity{}, ErrPVCSizeSmallerImageVirtualSize + } + + // Adjust PVC size to feat image onto scratch PVC. + // TODO(future): remove size adjusting after get rid of scratch. + adjustedSize := vdutil.AdjustPVCSize(unpackedSize) + + if pvcSize != nil && pvcSize.Cmp(adjustedSize) == 1 { + return *pvcSize, nil + } + + return adjustedSize, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go new file mode 100644 index 000000000..d02d810c5 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go @@ -0,0 +1,288 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + "errors" + "fmt" + "log/slog" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + + common2 "github.com/deckhouse/virtualization-controller/pkg/common" + vdutil "github.com/deckhouse/virtualization-controller/pkg/common/datavolume" + "github.com/deckhouse/virtualization-controller/pkg/controller" + "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type ObjectRefDataSource struct { + statService *service.StatService + diskService *service.DiskService + client client.Client + logger *slog.Logger +} + +func NewObjectRefDataSource( + statService *service.StatService, + diskService *service.DiskService, + client client.Client, +) *ObjectRefDataSource { + return &ObjectRefDataSource{ + statService: statService, + diskService: diskService, + client: client, + logger: slog.Default().With("controller", common.VDShortName, "ds", "objectref"), + } +} + +func (ds ObjectRefDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + ds.logger.Info("Sync", "vd", vd.Name) + + condition, _ := service.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + defer func() { service.SetCondition(condition, &vd.Status.Conditions) }() + + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + dv, err := ds.diskService.GetDataVolume(ctx, supgen) + if err != nil { + return false, err + } + pvc, err := ds.diskService.GetPersistentVolumeClaim(ctx, supgen) + if err != nil { + return false, err + } + pv, err := ds.diskService.GetPersistentVolume(ctx, pvc) + if err != nil { + return false, err + } + + switch { + case isDiskProvisioningFinished(condition): + ds.logger.Info("Finishing...", "vd", vd.Name) + + switch { + case pvc == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PVC %s not found.", supgen.PersistentVolumeClaim().String()) + case pv == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PV %s not found.", pvc.Spec.VolumeName) + default: + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + } + + // Protect Ready Disk and underlying PVC and PV. + err = ds.diskService.Protect(ctx, vd, nil, pvc, pv) + if err != nil { + return false, err + } + + err = ds.diskService.Unprotect(ctx, dv) + if err != nil { + return false, err + } + + return CleanUpSupplements(ctx, vd, ds) + case common.AnyTerminating(dv, pvc, pv): + vd.Status.Phase = virtv2.DiskPending + + ds.logger.Info("Cleaning up...", "vd", vd.Name) + case dv == nil: + var diskSize resource.Quantity + diskSize, err = ds.getPVCSize(ctx, vd) + if err != nil { + return false, err + } + + var source *cdiv1.DataVolumeSource + source, err = ds.getSource(ctx, vd, supgen) + if err != nil { + return false, err + } + + err = ds.diskService.Start(ctx, diskSize, vd.Spec.PersistentVolumeClaim.StorageClass, source, vd, supgen) + if err != nil { + return false, err + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "PVC Provisioner not found: create the new one." + + vd.Status.Progress = "0%" + + ds.logger.Info("Create data volume...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", "nil") + + return true, nil + case common.IsDataVolumeComplete(dv): + vd.Status.Phase = virtv2.DiskReady + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + + vd.Status.Progress = "100%" + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + ds.logger.Info("Ready", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + default: + ds.logger.Info("Provisioning...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + + vd.Status.Progress = ds.diskService.GetProgress(dv, vd.Status.Progress, service.NewScaleOption(0, 100)) + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + err = ds.diskService.Protect(ctx, vd, dv, pvc, pv) + if err != nil { + return false, err + } + + err = ds.diskService.CheckStorageClass(ctx, vd.Spec.PersistentVolumeClaim.StorageClass) + switch { + case err == nil: + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "Import is in the process of provisioning to PVC." + return false, nil + case errors.Is(err, service.ErrStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Provided StorageClass not found in the cluster." + return false, nil + case errors.Is(err, service.ErrDefaultStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Default StorageClass not found in the cluster: please provide a StorageClass name or set a default StorageClass." + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + requeue, err := ds.diskService.CleanUp(ctx, supgen) + if err != nil { + return false, err + } + + return requeue, nil +} + +func (ds ObjectRefDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + requeue, err := ds.diskService.CleanUpSupplements(ctx, supgen) + if err != nil { + return false, err + } + + return requeue, nil +} + +func (ds ObjectRefDataSource) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { + if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { + return errors.New("object ref missed for data source") + } + + dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, vd.Spec.DataSource, vd, ds.client) + if err != nil { + return err + } + + if dvcrDataSource.IsReady() { + return nil + } + + switch vd.Spec.DataSource.ObjectRef.Kind { + case virtv2.VirtualDiskObjectRefKindVirtualImage: + return NewImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) + case virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + return NewClusterImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) + default: + return fmt.Errorf("unexpected object ref kind: %s", vd.Spec.DataSource.ObjectRef.Kind) + } +} + +func (ds ObjectRefDataSource) getSource(ctx context.Context, vd *virtv2.VirtualDisk, sup *supplements.Generator) (*cdiv1.DataVolumeSource, error) { + dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, vd.Spec.DataSource, vd, ds.client) + if err != nil { + return nil, err + } + + url := common2.DockerRegistrySchemePrefix + dvcrDataSource.GetTarget() + secretName := sup.DVCRAuthSecretForDV().Name + certConfigMapName := sup.DVCRCABundleConfigMapForDV().Name + + return &cdiv1.DataVolumeSource{ + Registry: &cdiv1.DataVolumeSourceRegistry{ + URL: &url, + SecretRef: &secretName, + CertConfigMap: &certConfigMapName, + }, + }, nil +} + +func (ds ObjectRefDataSource) getPVCSize(ctx context.Context, vd *virtv2.VirtualDisk) (resource.Quantity, error) { + dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, vd.Spec.DataSource, vd, ds.client) + if err != nil { + return resource.Quantity{}, err + } + + unpackedSize, err := resource.ParseQuantity(dvcrDataSource.GetSize().UnpackedBytes) + if err != nil { + return resource.Quantity{}, err + } + + if unpackedSize.IsZero() { + return resource.Quantity{}, errors.New("got zero unpacked size from data source") + } + + pvcSize := vd.Spec.PersistentVolumeClaim.Size + if pvcSize != nil && !pvcSize.IsZero() && pvcSize.Cmp(unpackedSize) == -1 { + return resource.Quantity{}, ErrPVCSizeSmallerImageVirtualSize + } + + // Adjust PVC size to feat image onto scratch PVC. + // TODO(future): remove size adjusting after get rid of scratch. + adjustedSize := vdutil.AdjustPVCSize(unpackedSize) + + if pvcSize != nil && pvcSize.Cmp(adjustedSize) == 1 { + return *pvcSize, nil + } + + return adjustedSize, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go new file mode 100644 index 000000000..2daf8c5e1 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go @@ -0,0 +1,388 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + "errors" + "fmt" + "log/slog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + + cc "github.com/deckhouse/virtualization-controller/pkg/common" + "github.com/deckhouse/virtualization-controller/pkg/common/datasource" + vdutil "github.com/deckhouse/virtualization-controller/pkg/common/datavolume" + "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/importer" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + "github.com/deckhouse/virtualization-controller/pkg/dvcr" + "github.com/deckhouse/virtualization-controller/pkg/sdk/framework/helper" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type RegistryDataSource struct { + statService *service.StatService + importerService *service.ImporterService + diskService *service.DiskService + dvcrSettings *dvcr.Settings + client client.Client + logger *slog.Logger +} + +func NewRegistryDataSource( + statService *service.StatService, + importerService *service.ImporterService, + diskService *service.DiskService, + dvcrSettings *dvcr.Settings, + client client.Client, +) *RegistryDataSource { + return &RegistryDataSource{ + statService: statService, + importerService: importerService, + diskService: diskService, + dvcrSettings: dvcrSettings, + client: client, + logger: slog.Default().With("controller", common.VDShortName, "ds", "http"), + } +} + +func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + ds.logger.Info("Sync", "vd", vd.Name) + + condition, _ := service.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + defer func() { service.SetCondition(condition, &vd.Status.Conditions) }() + + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + pod, err := ds.importerService.GetPod(ctx, supgen) + if err != nil { + return false, err + } + dv, err := ds.diskService.GetDataVolume(ctx, supgen) + if err != nil { + return false, err + } + pvc, err := ds.diskService.GetPersistentVolumeClaim(ctx, supgen) + if err != nil { + return false, err + } + pv, err := ds.diskService.GetPersistentVolume(ctx, pvc) + if err != nil { + return false, err + } + + switch { + case isDiskProvisioningFinished(condition): + ds.logger.Info("Finishing...", "vd", vd.Name) + + switch { + case pvc == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PVC %s not found.", supgen.PersistentVolumeClaim().String()) + case pv == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PV %s not found.", pvc.Spec.VolumeName) + default: + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + } + + // Protect Ready Disk and underlying PVC and PV. + err = ds.diskService.Protect(ctx, vd, nil, pvc, pv) + if err != nil { + return false, err + } + + // Unprotect import time supplements to delete them later. + err = ds.importerService.Unprotect(ctx, pod) + if err != nil { + return false, err + } + + err = ds.diskService.Unprotect(ctx, dv) + if err != nil { + return false, err + } + + return CleanUpSupplements(ctx, vd, ds) + case common.AnyTerminating(pod, dv, pvc): + vd.Status.Phase = virtv2.DiskPending + + ds.logger.Info("Cleaning up...", "vd", vd.Name) + case pod == nil: + envSettings := ds.getEnvSettings(vd, supgen) + err = ds.importerService.Start(ctx, envSettings, vd, supgen, datasource.NewCABundleForVMD(vd.Spec.DataSource)) + if err != nil { + return false, err + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "DVCR Provisioner not found: create the new one." + + vd.Status.Progress = "0%" + + ds.logger.Info("Create importer pod...", "vd", vd.Name, "progress", vd.Status.Progress, "pod.phase", "nil") + case !common.IsPodComplete(pod): + err = ds.statService.CheckPod(pod) + if err != nil { + vd.Status.Phase = virtv2.DiskFailed + + switch { + case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningNotStarted + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + case errors.Is(err, service.ErrProvisioningFailed): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + default: + return false, err + } + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "DVCR Provisioner not found: create the new one." + + vd.Status.Progress = ds.statService.GetProgress(vd.GetUID(), pod, vd.Status.Progress, service.NewScaleOption(0, 50)) + + err = ds.importerService.Protect(ctx, pod) + if err != nil { + return false, err + } + + ds.logger.Info("Provisioning...", "vd", vd.Name, "progress", vd.Status.Progress, "pod.phase", pod.Status.Phase) + case dv == nil: + err = ds.statService.CheckPod(pod) + if err != nil { + vd.Status.Phase = virtv2.DiskFailed + + switch { + case errors.Is(err, service.ErrProvisioningFailed): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + default: + return false, err + } + } + + var diskSize resource.Quantity + diskSize, err = ds.getPVCSize(vd, pod) + if err != nil { + return false, err + } + + source := ds.getSource(vd, supgen) + + err = ds.diskService.Start(ctx, diskSize, vd.Spec.PersistentVolumeClaim.StorageClass, source, vd, supgen) + if err != nil { + return false, err + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "PVC Provisioner not found: create the new one." + + vd.Status.Progress = "50%" + + ds.logger.Info("Create data volume...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", "nil") + + return true, nil + case common.IsDataVolumeComplete(dv): + vd.Status.Phase = virtv2.DiskReady + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + + vd.Status.Progress = "100%" + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + ds.logger.Info("Ready", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + default: + ds.logger.Info("Provisioning...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + + vd.Status.Progress = ds.diskService.GetProgress(dv, vd.Status.Progress, service.NewScaleOption(50, 100)) + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + err = ds.diskService.Protect(ctx, vd, dv, pvc, pv) + if err != nil { + return false, err + } + + err = ds.diskService.CheckStorageClass(ctx, vd.Spec.PersistentVolumeClaim.StorageClass) + switch { + case err == nil: + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "Import is in the process of provisioning to PVC." + return false, nil + case errors.Is(err, service.ErrStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Provided StorageClass not found in the cluster." + return false, nil + case errors.Is(err, service.ErrDefaultStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Default StorageClass not found in the cluster: please provide a StorageClass name or set a default StorageClass." + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) + if err != nil { + return false, err + } + + diskRequeue, err := ds.diskService.CleanUp(ctx, supgen) + if err != nil { + return false, err + } + + return importerRequeue || diskRequeue, nil +} + +func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) + if err != nil { + return false, err + } + + diskRequeue, err := ds.diskService.CleanUpSupplements(ctx, supgen) + if err != nil { + return false, err + } + + return importerRequeue || diskRequeue, nil +} + +func (ds RegistryDataSource) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { + if vd.Spec.DataSource == nil || vd.Spec.DataSource.ContainerImage == nil { + return errors.New("container image missed for data source") + } + + if vd.Spec.DataSource.ContainerImage.ImagePullSecret.Name != "" { + secretName := types.NamespacedName{ + Namespace: vd.Spec.DataSource.ContainerImage.ImagePullSecret.Namespace, + Name: vd.Spec.DataSource.ContainerImage.ImagePullSecret.Name, + } + secret, err := helper.FetchObject[*corev1.Secret](ctx, secretName, ds.client, &corev1.Secret{}) + if err != nil { + return fmt.Errorf("failed to get secret %s: %w", secretName, err) + } + + if secret == nil { + return ErrSecretNotFound + } + } + + return nil +} + +func (ds RegistryDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supplements.Generator) *importer.Settings { + var settings importer.Settings + + importer.ApplyRegistrySourceSettings(&settings, vd.Spec.DataSource.ContainerImage, supgen) + importer.ApplyDVCRDestinationSettings( + &settings, + ds.dvcrSettings, + supgen, + ds.dvcrSettings.RegistryImageForVMD(vd.Name, vd.Namespace), + ) + + return &settings +} + +func (ds RegistryDataSource) getSource(vd *virtv2.VirtualDisk, sup *supplements.Generator) *cdiv1.DataVolumeSource { + // The image was preloaded from source into dvcr. + // We can't use the same data source a second time, but we can set dvcr as the data source. + // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. + dvcrSourceImageName := ds.dvcrSettings.RegistryImageForVMD(vd.Name, vd.Namespace) + + url := cc.DockerRegistrySchemePrefix + dvcrSourceImageName + secretName := sup.DVCRAuthSecretForDV().Name + certConfigMapName := sup.DVCRCABundleConfigMapForDV().Name + + return &cdiv1.DataVolumeSource{ + Registry: &cdiv1.DataVolumeSourceRegistry{ + URL: &url, + SecretRef: &secretName, + CertConfigMap: &certConfigMapName, + }, + } +} + +func (ds RegistryDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { + // Get size from the importer Pod to detect if specified PVC size is enough. + unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) + if err != nil { + return resource.Quantity{}, err + } + + if unpackedSize.IsZero() { + return resource.Quantity{}, errors.New("got zero unpacked size from data source") + } + + pvcSize := vd.Spec.PersistentVolumeClaim.Size + if pvcSize != nil && !pvcSize.IsZero() && pvcSize.Cmp(unpackedSize) == -1 { + return resource.Quantity{}, ErrPVCSizeSmallerImageVirtualSize + } + + // Adjust PVC size to feat image onto scratch PVC. + // TODO(future): remove size adjusting after get rid of scratch. + adjustedSize := vdutil.AdjustPVCSize(unpackedSize) + + if pvcSize != nil && pvcSize.Cmp(adjustedSize) == 1 { + return *pvcSize, nil + } + + return adjustedSize, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go new file mode 100644 index 000000000..e86c1f0e3 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go @@ -0,0 +1,91 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cc "github.com/deckhouse/virtualization-controller/pkg/controller/common" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type Handler interface { + Sync(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + Validate(ctx context.Context, vd *virtv2.VirtualDisk) error +} + +type Sources struct { + sources map[virtv2.DataSourceType]Handler +} + +func NewSources() *Sources { + return &Sources{ + sources: make(map[virtv2.DataSourceType]Handler), + } +} + +func (s Sources) Set(dsType virtv2.DataSourceType, h Handler) { + s.sources[dsType] = h +} + +func (s Sources) Get(dsType virtv2.DataSourceType) (Handler, bool) { + source, ok := s.sources[dsType] + return source, ok +} + +func (s Sources) Changed(_ context.Context, vd *virtv2.VirtualDisk) bool { + if vd.Generation == 1 { + return false + } + + return vd.Generation != vd.Status.ObservedGeneration +} + +func (s Sources) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + var requeue bool + + for _, source := range s.sources { + ok, err := source.CleanUp(ctx, vd) + if err != nil { + return false, err + } + + requeue = requeue || ok + } + + return requeue, nil +} + +type Cleaner interface { + CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) +} + +func CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk, c Cleaner) (bool, error) { + if cc.ShouldCleanupSubResources(vd) { + return c.CleanUpSupplements(ctx, vd) + } + + return false, nil +} + +func isDiskProvisioningFinished(c metav1.Condition) bool { + return c.Reason == vdcondition.ReadyReason_Ready || c.Reason == vdcondition.ReadyReason_Lost +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go new file mode 100644 index 000000000..657a7f5be --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go @@ -0,0 +1,390 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + "errors" + "fmt" + "log/slog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + + common2 "github.com/deckhouse/virtualization-controller/pkg/common" + "github.com/deckhouse/virtualization-controller/pkg/common/datasource" + vdutil "github.com/deckhouse/virtualization-controller/pkg/common/datavolume" + "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + "github.com/deckhouse/virtualization-controller/pkg/controller/uploader" + "github.com/deckhouse/virtualization-controller/pkg/dvcr" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +type UploadDataSource struct { + statService *service.StatService + uploaderService *service.UploaderService + diskService *service.DiskService + dvcrSettings *dvcr.Settings + logger *slog.Logger +} + +func NewUploadDataSource( + statService *service.StatService, + uploaderService *service.UploaderService, + diskService *service.DiskService, + dvcrSettings *dvcr.Settings, +) *UploadDataSource { + return &UploadDataSource{ + statService: statService, + uploaderService: uploaderService, + diskService: diskService, + dvcrSettings: dvcrSettings, + logger: slog.Default().With("controller", common.VDShortName, "ds", "upload"), + } +} + +func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + ds.logger.Info("Sync", "vd", vd.Name) + + condition, _ := service.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + defer func() { service.SetCondition(condition, &vd.Status.Conditions) }() + + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + pod, err := ds.uploaderService.GetPod(ctx, supgen) + if err != nil { + return false, err + } + svc, err := ds.uploaderService.GetService(ctx, supgen) + if err != nil { + return false, err + } + ing, err := ds.uploaderService.GetIngress(ctx, supgen) + if err != nil { + return false, err + } + dv, err := ds.diskService.GetDataVolume(ctx, supgen) + if err != nil { + return false, err + } + pvc, err := ds.diskService.GetPersistentVolumeClaim(ctx, supgen) + if err != nil { + return false, err + } + pv, err := ds.diskService.GetPersistentVolume(ctx, pvc) + if err != nil { + return false, err + } + + if vd.Status.UploadCommand == "" { + if ing != nil && ing.Annotations[common.AnnUploadURL] != "" { + vd.Status.UploadCommand = fmt.Sprintf("curl %s -T example.iso", ing.Annotations[common.AnnUploadURL]) + } + } + + switch { + case isDiskProvisioningFinished(condition): + ds.logger.Info("Finishing...", "vd", vd.Name) + + switch { + case pvc == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PVC %s not found.", supgen.PersistentVolumeClaim().String()) + case pv == nil: + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Lost + condition.Message = fmt.Sprintf("PV %s not found.", pvc.Spec.VolumeName) + default: + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + } + + // Protect Ready Disk and underlying PVC and PV. + err = ds.diskService.Protect(ctx, vd, nil, pvc, pv) + if err != nil { + return false, err + } + + // Unprotect upload time supplements to delete them later. + err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) + if err != nil { + return false, err + } + + err = ds.diskService.Unprotect(ctx, dv) + if err != nil { + return false, err + } + + return CleanUpSupplements(ctx, vd, ds) + case common.AnyTerminating(pod, svc, ing, dv, pvc): + vd.Status.Phase = virtv2.DiskPending + condition.Status = metav1.ConditionUnknown + condition.Reason = "" + condition.Message = "" + + ds.logger.Info("Cleaning up...", "vd", vd.Name) + case pod == nil && svc == nil && ing == nil: + envSettings := ds.getEnvSettings(supgen) + err = ds.uploaderService.Start(ctx, envSettings, vd, supgen, datasource.NewCABundleForVMD(vd.Spec.DataSource)) + if err != nil { + return false, err + } + + vd.Status.Phase = virtv2.DiskPending + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "DVCR Provisioner not found: create the new one." + + vd.Status.Progress = "0%" + + ds.logger.Info("Create uploader pod...", "vd", vd.Name, "progress", vd.Status.Progress, "pod.phase", nil) + case !common.IsPodComplete(pod): + err = ds.statService.CheckPod(pod) + if err != nil { + vd.Status.Phase = virtv2.DiskFailed + + switch { + case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningNotStarted + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + case errors.Is(err, service.ErrProvisioningFailed): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + default: + return false, err + } + } + + if !ds.statService.IsUploadStarted(vd.GetUID(), pod) { + vd.Status.Phase = virtv2.DiskWaitForUserUpload + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_WaitForUserUpload + condition.Message = "Waiting for the user upload." + + ds.logger.Info("WaitForUserUpload...", "vd", vd.Name, "progress", vd.Status.Progress, "pod.phase", pod.Status.Phase) + + return false, nil + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "Import is in the process of provisioning to DVCR." + + vd.Status.Progress = ds.statService.GetProgress(vd.GetUID(), pod, vd.Status.Progress, service.NewScaleOption(0, 50)) + + err = ds.uploaderService.Protect(ctx, pod, svc, ing) + if err != nil { + return false, err + } + + ds.logger.Info("Provisioning...", "vd", vd.Name, "progress", vd.Status.Progress, "pod.phase", pod.Status.Phase) + case dv == nil: + err = ds.statService.CheckPod(pod) + if err != nil { + vd.Status.Phase = virtv2.DiskFailed + + switch { + case errors.Is(err, service.ErrProvisioningFailed): + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = service.CapitalizeFirstLetter(err.Error() + ".") + return false, nil + default: + return false, err + } + } + + var diskSize resource.Quantity + diskSize, err = ds.getPVCSize(vd, pod) + if err != nil { + return false, err + } + + source := ds.getSource(vd, supgen) + + err = ds.diskService.Start(ctx, diskSize, vd.Spec.PersistentVolumeClaim.StorageClass, source, vd, supgen) + if err != nil { + return false, err + } + + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "PVC Provisioner not found: create the new one." + + vd.Status.Progress = "50%" + + ds.logger.Info("Create data volume...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", "nil") + + return true, nil + case common.IsDataVolumeComplete(dv): + vd.Status.Phase = virtv2.DiskReady + condition.Status = metav1.ConditionTrue + condition.Reason = vdcondition.ReadyReason_Ready + condition.Message = "" + + vd.Status.Progress = "100%" + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + ds.logger.Info("Ready", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + default: + ds.logger.Info("Provisioning...", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) + + vd.Status.Progress = ds.diskService.GetProgress(dv, vd.Status.Progress, service.NewScaleOption(50, 100)) + vd.Status.Capacity = ds.diskService.GetCapacity(pvc) + vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + + err = ds.diskService.Protect(ctx, vd, dv, pvc, pv) + if err != nil { + return false, err + } + + err = ds.diskService.CheckStorageClass(ctx, vd.Spec.PersistentVolumeClaim.StorageClass) + switch { + case err == nil: + vd.Status.Phase = virtv2.DiskProvisioning + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_Provisioning + condition.Message = "Import is in the process of provisioning to PVC." + return false, nil + case errors.Is(err, service.ErrStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Provided StorageClass not found in the cluster." + return false, nil + case errors.Is(err, service.ErrDefaultStorageClassNotFound): + vd.Status.Phase = virtv2.DiskFailed + condition.Status = metav1.ConditionFalse + condition.Reason = vdcondition.ReadyReason_ProvisioningFailed + condition.Message = "Default StorageClass not found in the cluster: please provide a StorageClass name or set a default StorageClass." + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (ds UploadDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + uploaderRequeue, err := ds.uploaderService.CleanUp(ctx, supgen) + if err != nil { + return false, err + } + + diskRequeue, err := ds.diskService.CleanUp(ctx, supgen) + if err != nil { + return false, err + } + + return uploaderRequeue || diskRequeue, nil +} + +func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + supgen := supplements.NewGenerator(common.VDShortName, vd.Name, vd.Namespace, vd.UID) + + uploaderRequeue, err := ds.uploaderService.CleanUpSupplements(ctx, supgen) + if err != nil { + return false, err + } + + diskRequeue, err := ds.diskService.CleanUpSupplements(ctx, supgen) + if err != nil { + return false, err + } + + return uploaderRequeue || diskRequeue, nil +} + +func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { + return nil +} + +func (ds UploadDataSource) getEnvSettings(supgen *supplements.Generator) *uploader.Settings { + var settings uploader.Settings + + uploader.ApplyDVCRDestinationSettings( + &settings, + ds.dvcrSettings, + supgen, + ds.dvcrSettings.RegistryImageForVMD(supgen.Name, supgen.Namespace), + ) + + return &settings +} + +func (ds UploadDataSource) getSource(vd *virtv2.VirtualDisk, sup *supplements.Generator) *cdiv1.DataVolumeSource { + // The image was preloaded from source into dvcr. + // We can't use the same data source a second time, but we can set dvcr as the data source. + // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. + dvcrSourceImageName := ds.dvcrSettings.RegistryImageForVMD(vd.Name, vd.Namespace) + + url := common2.DockerRegistrySchemePrefix + dvcrSourceImageName + secretName := sup.DVCRAuthSecretForDV().Name + certConfigMapName := sup.DVCRCABundleConfigMapForDV().Name + + return &cdiv1.DataVolumeSource{ + Registry: &cdiv1.DataVolumeSourceRegistry{ + URL: &url, + SecretRef: &secretName, + CertConfigMap: &certConfigMapName, + }, + } +} + +func (ds UploadDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { + // Get size from the importer Pod to detect if specified PVC size is enough. + unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) + if err != nil { + return resource.Quantity{}, err + } + + if unpackedSize.IsZero() { + return resource.Quantity{}, errors.New("got zero unpacked size from data source") + } + + pvcSize := vd.Spec.PersistentVolumeClaim.Size + if pvcSize != nil && !pvcSize.IsZero() && pvcSize.Cmp(unpackedSize) == -1 { + return resource.Quantity{}, ErrPVCSizeSmallerImageVirtualSize + } + + // Adjust PVC size to feat image onto scratch PVC. + // TODO(future): remove size adjusting after get rid of scratch. + adjustedSize := vdutil.AdjustPVCSize(unpackedSize) + + if pvcSize != nil && pvcSize.Cmp(adjustedSize) == 1 { + return *pvcSize, nil + } + + return adjustedSize, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go new file mode 100644 index 000000000..827002ad8 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go @@ -0,0 +1,97 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vd + +import ( + "context" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal" + "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" + "github.com/deckhouse/virtualization-controller/pkg/dvcr" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +const ( + ControllerName = "vd-controller" + + PodVerbose = "3" + PodPullPolicy = string(corev1.PullIfNotPresent) +) + +type Condition interface { + Handle(ctx context.Context, vd *virtv2.VirtualDisk) error +} + +func NewController( + ctx context.Context, + mgr manager.Manager, + log logr.Logger, + importerImage string, + uploaderImage string, + dvcr *dvcr.Settings, +) (controller.Controller, error) { + stat := service.NewStatService() + protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVirtualDiskProtection) + importer := service.NewImporterService(dvcr, mgr.GetClient(), importerImage, PodPullPolicy, PodVerbose, ControllerName, protection) + uploader := service.NewUploaderService(dvcr, mgr.GetClient(), uploaderImage, PodPullPolicy, PodVerbose, ControllerName, protection) + disk := service.NewDiskService(mgr.GetClient(), dvcr, protection) + + blank := source.NewBlankDataSource(stat, disk) + + sources := source.NewSources() + sources.Set(virtv2.DataSourceTypeHTTP, source.NewHTTPDataSource(stat, importer, disk, dvcr)) + sources.Set(virtv2.DataSourceTypeContainerImage, source.NewRegistryDataSource(stat, importer, disk, dvcr, mgr.GetClient())) + sources.Set(virtv2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(stat, disk, mgr.GetClient())) + sources.Set(virtv2.DataSourceTypeUpload, source.NewUploadDataSource(stat, uploader, disk, dvcr)) + + reconciler := NewReconciler( + mgr.GetClient(), + internal.NewDatasourceReadyHandler(blank, sources), + internal.NewLifeCycleHandler(blank, sources, mgr.GetClient()), + internal.NewResizingHandler(disk), + internal.NewDeletionHandler(sources), + internal.NewAttacheeHandler(mgr.GetClient()), + ) + + vdController, err := controller.New(ControllerName, mgr, controller.Options{Reconciler: reconciler}) + if err != nil { + return nil, err + } + + err = reconciler.SetupController(ctx, mgr, vdController) + if err != nil { + return nil, err + } + + if err = builder.WebhookManagedBy(mgr). + For(&virtv2.VirtualDisk{}). + WithValidator(NewValidator()). + Complete(); err != nil { + return nil, err + } + + log.Info("Initialized VirtualDisk controller", "image", importerImage) + + return vdController, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go new file mode 100644 index 000000000..a81715df7 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go @@ -0,0 +1,252 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vd + +import ( + "context" + "errors" + "fmt" + "log/slog" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type Handler interface { + Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) +} + +type Reconciler struct { + handlers []Handler + client client.Client +} + +func NewReconciler(client client.Client, handlers ...Handler) *Reconciler { + return &Reconciler{ + client: client, + handlers: handlers, + } +} + +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + vd := service.NewResource(req.NamespacedName, r.client, r.factory, r.statusGetter) + + err := vd.Fetch(ctx) + if err != nil { + return reconcile.Result{}, err + } + + if vd.IsEmpty() { + return reconcile.Result{}, nil + } + + var requeue bool + + slog.Info("Start") + + var handlerErrs []error + + for _, h := range r.handlers { + var res reconcile.Result + slog.Info("Handle... " + reflect.TypeOf(h).Elem().Name()) + res, err = h.Handle(ctx, vd.Changed()) + if err != nil { + slog.Error("Failed to handle vd", "err", err, "handler", reflect.TypeOf(h).Elem().Name()) + handlerErrs = append(handlerErrs, err) + } + + // TODO: merger. + requeue = requeue || res.Requeue + } + + vd.Changed().Status.ObservedGeneration = vd.Changed().Generation + + slog.Info("Update") + + err = vd.Update(ctx) + if err != nil { + return reconcile.Result{}, err + } + + err = errors.Join(handlerErrs...) + if err != nil { + return reconcile.Result{}, err + } + + if requeue { + slog.Info("Requeue") + return reconcile.Result{ + RequeueAfter: 5 * time.Second, + }, nil + } + + slog.Info("Done") + + return reconcile.Result{}, nil +} + +func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { + if err := ctr.Watch( + source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}), + &handler.EnqueueRequestForObject{}, + predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return true }, + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + }, + ); err != nil { + return fmt.Errorf("error setting watch on VirtualDisk: %w", err) + } + + if err := ctr.Watch( + source.Kind(mgr.GetCache(), &cdiv1.DataVolume{}), + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &virtv2.VirtualDisk{}, + handler.OnlyControllerOwner(), + ), + predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return false }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldDV, ok := e.ObjectOld.(*cdiv1.DataVolume) + if !ok { + return false + } + newDV, ok := e.ObjectNew.(*cdiv1.DataVolume) + if !ok { + return false + } + + return oldDV.Status.Progress != newDV.Status.Progress + }, + }, + ); err != nil { + return fmt.Errorf("error setting watch on DV: %w", err) + } + + if err := ctr.Watch( + source.Kind(mgr.GetCache(), &corev1.PersistentVolumeClaim{}), + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &virtv2.VirtualDisk{}, + handler.OnlyControllerOwner(), + ), predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldPVC, ok := e.ObjectOld.(*corev1.PersistentVolumeClaim) + if !ok { + return false + } + newPVC, ok := e.ObjectNew.(*corev1.PersistentVolumeClaim) + if !ok { + return false + } + + return oldPVC.Status.Capacity[corev1.ResourceStorage] != newPVC.Status.Capacity[corev1.ResourceStorage] + }, + }, + ); err != nil { + return fmt.Errorf("error setting watch on PVC: %w", err) + } + + if err := ctr.Watch( + source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}), + handler.EnqueueRequestsFromMapFunc(r.enqueueDisksAttachedToVM()), + predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return r.vmHasAttachedDisks(e.Object) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return r.vmHasAttachedDisks(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return r.vmHasAttachedDisks(e.ObjectOld) || r.vmHasAttachedDisks(e.ObjectNew) + }, + }, + ); err != nil { + return fmt.Errorf("error setting watch on VMs: %w", err) + } + + return nil +} + +func (r *Reconciler) factory() *virtv2.VirtualDisk { + return &virtv2.VirtualDisk{} +} + +func (r *Reconciler) statusGetter(obj *virtv2.VirtualDisk) virtv2.VirtualDiskStatus { + return obj.Status +} + +func (r *Reconciler) enqueueDisksAttachedToVM() handler.MapFunc { + return func(_ context.Context, obj client.Object) []reconcile.Request { + vm, ok := obj.(*virtv2.VirtualMachine) + if !ok { + return nil + } + + var requests []reconcile.Request + + for _, bda := range vm.Status.BlockDeviceRefs { + if bda.Kind != virtv2.DiskDevice { + continue + } + + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{ + Name: bda.Name, + Namespace: vm.Namespace, + }}) + } + + return requests + } +} + +func (r *Reconciler) vmHasAttachedDisks(obj client.Object) bool { + vm, ok := obj.(*virtv2.VirtualMachine) + if !ok { + return false + } + + for _, bda := range vm.Status.BlockDeviceRefs { + if bda.Kind == virtv2.DiskDevice { + return true + } + } + + return false +} diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go b/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go new file mode 100644 index 000000000..55205d49b --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go @@ -0,0 +1,123 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vd + +import ( + "context" + "errors" + "fmt" + "log/slog" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/deckhouse/virtualization-controller/pkg/controller/common" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" +) + +type Validator struct { + logger *slog.Logger +} + +func NewValidator() *Validator { + return &Validator{ + logger: slog.Default().With("controller", common.VDShortName, "webhook", "validator"), + } +} + +func (v *Validator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + vd, ok := obj.(*virtv2.VirtualDisk) + if !ok { + return nil, fmt.Errorf("expected a new VirtualDisk but got a %T", obj) + } + + v.logger.Info("Validating virtual disk", "spec.pvc.size", vd.Spec.PersistentVolumeClaim.Size) + + if vd.Spec.PersistentVolumeClaim.Size != nil && vd.Spec.PersistentVolumeClaim.Size.IsZero() { + return nil, fmt.Errorf("virtual machine disk size must be greater than 0") + } + + if vd.Spec.DataSource == nil && (vd.Spec.PersistentVolumeClaim.Size == nil || vd.Spec.PersistentVolumeClaim.Size.IsZero()) { + return nil, fmt.Errorf("if the data source is not specified, it's necessary to set spec.PersistentVolumeClaim.size to create blank virtual disk") + } + + return nil, nil +} + +func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + newVD, ok := newObj.(*virtv2.VirtualDisk) + if !ok { + return nil, fmt.Errorf("expected a new VirtualDisk but got a %T", newObj) + } + + oldVD, ok := oldObj.(*virtv2.VirtualDisk) + if !ok { + return nil, fmt.Errorf("expected an old VirtualDisk but got a %T", oldObj) + } + + v.logger.Info("Validating virtual disk", + "old.spec.pvc.size", oldVD.Spec.PersistentVolumeClaim.Size, + "new.spec.pvc.size", newVD.Spec.PersistentVolumeClaim.Size, + ) + + if newVD.Spec.PersistentVolumeClaim.Size == oldVD.Spec.PersistentVolumeClaim.Size { + return nil, nil + } + + if newVD.Spec.PersistentVolumeClaim.Size == nil { + return nil, errors.New("spec.persistentVolumeClaim.size cannot be omitted once set") + } + + if newVD.Spec.PersistentVolumeClaim.Size.IsZero() { + return nil, fmt.Errorf("virtual machine disk size must be greater than 0") + } + + if oldVD.Spec.PersistentVolumeClaim.Size != nil && newVD.Spec.PersistentVolumeClaim.Size.Cmp(*oldVD.Spec.PersistentVolumeClaim.Size) == -1 { + return nil, fmt.Errorf( + "spec.persistentVolumeClaim.size value (%s) should be greater than or equal to the current value (%s)", + newVD.Spec.PersistentVolumeClaim.Size.String(), + oldVD.Spec.PersistentVolumeClaim.Size.String(), + ) + } + + if oldVD.Generation == newVD.Generation { + return nil, nil + } + + ready, _ := service.GetCondition(cvicondition.ReadyType, newVD.Status.Conditions) + if newVD.Status.Phase == virtv2.DiskReady || newVD.Status.Phase == virtv2.DiskLost || ready.Status == metav1.ConditionTrue { + if !reflect.DeepEqual(oldVD.Spec.DataSource, newVD.Spec.DataSource) { + return nil, fmt.Errorf("VirtualDisk has already been created: data source cannot be changed after disk is created") + } + + if !reflect.DeepEqual(oldVD.Spec.PersistentVolumeClaim.StorageClass, newVD.Spec.PersistentVolumeClaim.StorageClass) { + return nil, fmt.Errorf("VirtualDisk has already been created: storage class cannot be changed after disk is created") + } + } + + return nil, nil +} + +func (v *Validator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + err := fmt.Errorf("misconfigured webhook rules: delete operation not implemented") + v.logger.Error("Ensure the correctness of ValidatingWebhookConfiguration", "err", err) + return nil, nil +}