diff --git a/common/k8stest/test.go b/common/k8stest/test.go index ea8db0f..f20f4e6 100644 --- a/common/k8stest/test.go +++ b/common/k8stest/test.go @@ -174,6 +174,7 @@ func ResourceCheck(waitForPools bool) error { } else { if msvs != nil { if len(msvs) != 0 { + logf.Log.Info("ResourceCheck: found", "msvs", msvs) errs.Accumulate(fmt.Errorf("found MayastorVolumes")) } } else { @@ -311,3 +312,83 @@ func ResourceK8sCheck() error { return errs.GetError() } + +// VerifyMayastorResourceUsageIsZero returns true if mayastor resource usage is 0 +// if resource values cannot be established returns false +func VerifyMayastorResourceUsageIsZero() bool { + var errs = common.ErrorAccumulator{} + var mspUsage, poolUsage uint64 + allPoolsOnline := false + msvCount := -1 + nexusCount := -1 + nvmeControllerCount := -1 + replicaCount := -1 + + msvs, err := ListMsvs() + if err != nil { + errs.Accumulate(err) + } else { + if msvs != nil { + msvCount = len(msvs) + } else { + logf.Log.Info("Listing MSVs returned nil array") + } + } + + err = custom_resources.CheckAllMsPoolsAreOnline() + if err != nil { + errs.Accumulate(err) + } else { + allPoolsOnline = true + } + + mspUsage, err = getMspUsage() + if err != nil { + errs.Accumulate(err) + } + + // gRPC calls can only be executed successfully is the e2e-agent daemonSet has been deployed successfully. + if mayastorclient.CanConnect() { + // check pools + { + poolUsage, err = GetPoolUsageInCluster() + if err != nil { + errs.Accumulate(err) + } + } + // check nexuses + { + nexuses, err := ListNexusesInCluster() + if err != nil { + errs.Accumulate(err) + } else { + nexusCount = len(nexuses) + } + } + // check replicas + { + replicas, err := ListReplicasInCluster() + if err != nil { + errs.Accumulate(err) + } else { + replicaCount = len(replicas) + } + } + // check nvmeControllers + { + nvmeControllers, err := ListNvmeControllersInCluster() + if err != nil { + errs.Accumulate(err) + } else { + nvmeControllerCount = len(nvmeControllers) + } + } + } else { + errs.Accumulate(fmt.Errorf("gRPC calls to mayastor are not enabled, all checks cannot be run")) + } + if errs.GetError() != nil { + logf.Log.Info("VerifyMayastorResourceUsageIsZero: ", "errors", errs.GetError()) + } + logf.Log.Info("VerifyMayastorResourceUsageIsZero: ", "allPoolsOnline", allPoolsOnline, "mspUsage", mspUsage, "poolUsage", poolUsage, "msvCount", msvCount, "nexusCount", nexusCount, "nvmeControllerCount", nvmeControllerCount, "replicaCount", replicaCount) + return allPoolsOnline && mspUsage == 0 && poolUsage == 0 && msvCount == 0 && nexusCount == 0 && nvmeControllerCount == 0 && replicaCount == 0 && errs.GetError() == nil +} diff --git a/common/k8stest/util_pvc.go b/common/k8stest/util_pvc.go index c13cb76..c2cec21 100644 --- a/common/k8stest/util_pvc.go +++ b/common/k8stest/util_pvc.go @@ -540,12 +540,17 @@ func CheckForPVs() (bool, error) { return foundResources, err } +// CreatePvc func CreatePvc(createOpts *coreV1.PersistentVolumeClaim, errBuf *error, uuid *string, wg *sync.WaitGroup) { // Create the PVC. pvc, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims(createOpts.ObjectMeta.Namespace).Create(context.TODO(), createOpts, metaV1.CreateOptions{}) *errBuf = err - if pvc != nil { - *uuid = string(pvc.UID) + if err == nil { + if pvc != nil { + *uuid = string(pvc.UID) + } else { + *errBuf = fmt.Errorf("pvc create return nil pvc") + } } wg.Done() } diff --git a/common/k8stest/util_testpods.go b/common/k8stest/util_testpods.go index 283ef4f..14ae5d6 100644 --- a/common/k8stest/util_testpods.go +++ b/common/k8stest/util_testpods.go @@ -358,7 +358,7 @@ var reCompileOnce sync.Once var reFioLog *regexp.Regexp = nil var reFioCritical *regexp.Regexp = nil -func ScanFioPodLogs(pod v1.Pod, synopsisIn *common.E2eFioPodLogSynopsis) *common.E2eFioPodLogSynopsis { +func ScanFioPodLogs(pod v1.Pod, synopsisIn *common.E2eFioPodLogSynopsis, waitSecs int) *common.E2eFioPodLogSynopsis { var podLogSynopsis *common.E2eFioPodLogSynopsis if synopsisIn != nil { podLogSynopsis = synopsisIn @@ -383,14 +383,28 @@ func ScanFioPodLogs(pod v1.Pod, synopsisIn *common.E2eFioPodLogSynopsis) *common logf.Log.Info("WARNING failed to compile regular expression for fio critical failure search") } }) + podApi := gTestEnv.KubeInt.CoreV1().Pods for _, container := range pod.Spec.Containers { opts := v1.PodLogOptions{} opts.Follow = true opts.Container = container.Name + { + podCheck, podCheckErr := podApi(pod.Namespace).Get(context.TODO(), pod.Name, metaV1.GetOptions{}) + for ix := 0; podCheck.Status.Phase == coreV1.PodPending && ix < waitSecs && podCheckErr == nil; ix++ { + time.Sleep(time.Second * 1) + podCheck, podCheckErr = podApi(pod.Namespace).Get(context.TODO(), pod.Name, metaV1.GetOptions{}) + } + if podCheckErr != nil { + podLogSynopsis.Err = podCheckErr + logf.Log.Info("Failed check pod status != pending", "pod", pod.Name, "err", podCheckErr) + return podLogSynopsis + } + pod = *podCheck + } podLogs, err := gTestEnv.KubeInt.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &opts).Stream(context.TODO()) if err != nil { podLogSynopsis.Err = err - logf.Log.Info("Failed to stream logs for", "pod", pod, "err", err) + logf.Log.Info("Failed to stream logs for", "pod", pod.Name, "pod.Status", pod.Status.Phase, "err", err) return podLogSynopsis } reader := bufio.NewScanner(podLogs) @@ -403,15 +417,15 @@ func ScanFioPodLogs(pod v1.Pod, synopsisIn *common.E2eFioPodLogSynopsis) *common podLogSynopsis.CriticalFailure = true } if strings.HasPrefix(line, "JSON") { - jsondata := line[4:] + jsonData := line[4:] fTSize := common.FioTargetSizeRecord{} fExit := common.FioExitRecord{} - ju_err := json.Unmarshal([]byte(jsondata), &fTSize) - if ju_err == nil && fTSize.Size != nil { + juErr := json.Unmarshal([]byte(jsonData), &fTSize) + if juErr == nil && fTSize.Size != nil { podLogSynopsis.JsonRecords.TargetSizes = append(podLogSynopsis.JsonRecords.TargetSizes, fTSize) } - ju_err = json.Unmarshal([]byte(jsondata), &fExit) - if ju_err == nil && fExit.ExitValue != nil { + juErr = json.Unmarshal([]byte(jsonData), &fExit) + if juErr == nil && fExit.ExitValue != nil { podLogSynopsis.JsonRecords.ExitValues = append(podLogSynopsis.JsonRecords.ExitValues, fExit) } } @@ -428,7 +442,7 @@ func ScanFioPodLogsByName(podName string, nameSpace string) (*common.E2eFioPodLo if err != nil { return podLogSynopsis, err } - return ScanFioPodLogs(*pod, nil), nil + return ScanFioPodLogs(*pod, nil, 60), nil } // MonitorE2EFioPod launches a go thread to stream fio pod log output and scan that stream @@ -441,7 +455,7 @@ func MonitorE2EFioPod(podName string, nameSpace string) (*common.E2eFioPodOutput return nil, err } go func(synopsis *common.E2eFioPodLogSynopsis, pod v1.Pod) { - ScanFioPodLogs(pod, synopsis) + ScanFioPodLogs(pod, synopsis, 10000) podOut.Completed = true }(&podOut.Synopsis, *pod) podOut.PodName = podName @@ -461,7 +475,7 @@ func CheckFioPodCompleted(podName string, nameSpace string) (coreV1.PodPhase, *c if !containerStatus.Ready { if containerStatus.State.Terminated != nil && containerStatus.State.Terminated.Reason == "Completed" { - podLogSynopsis = ScanFioPodLogs(*pod, nil) + podLogSynopsis = ScanFioPodLogs(*pod, nil, 60) if containerStatus.State.Terminated.ExitCode == 0 { return coreV1.PodSucceeded, podLogSynopsis, podLogSynopsis.Err } else { @@ -473,7 +487,7 @@ func CheckFioPodCompleted(podName string, nameSpace string) (coreV1.PodPhase, *c } if podLogSynopsis == nil || podLogSynopsis.Err != nil { if pod.Status.Phase != coreV1.PodRunning && pod.Status.Phase != coreV1.PodPending { - podLogSynopsis = ScanFioPodLogs(*pod, nil) + podLogSynopsis = ScanFioPodLogs(*pod, nil, 60) } else { podLogSynopsis = &common.E2eFioPodLogSynopsis{} }