Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: update/fixes to support large numbers of volumes and pods #115

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 81 additions & 0 deletions common/k8stest/test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ func ResourceCheck(waitForPools bool) error {
} else {
if msvs != nil {
if len(msvs) != 0 {
logf.Log.Info("ResourceCheck: found", "msvs", msvs)
errs.Accumulate(fmt.Errorf("found MayastorVolumes"))
}
} else {
Expand Down Expand Up @@ -311,3 +312,83 @@ func ResourceK8sCheck() error {

return errs.GetError()
}

// VerifyMayastorResourceUsageIsZero returns true if mayastor resource usage is 0
// if resource values cannot be established returns false
func VerifyMayastorResourceUsageIsZero() bool {
var errs = common.ErrorAccumulator{}
var mspUsage, poolUsage uint64
allPoolsOnline := false
msvCount := -1
nexusCount := -1
nvmeControllerCount := -1
replicaCount := -1

msvs, err := ListMsvs()
if err != nil {
errs.Accumulate(err)
} else {
if msvs != nil {
msvCount = len(msvs)
} else {
logf.Log.Info("Listing MSVs returned nil array")
}
}

err = custom_resources.CheckAllMsPoolsAreOnline()
if err != nil {
errs.Accumulate(err)
} else {
allPoolsOnline = true
}

mspUsage, err = getMspUsage()
if err != nil {
errs.Accumulate(err)
}

// gRPC calls can only be executed successfully is the e2e-agent daemonSet has been deployed successfully.
if mayastorclient.CanConnect() {
// check pools
{
poolUsage, err = GetPoolUsageInCluster()
if err != nil {
errs.Accumulate(err)
}
}
// check nexuses
{
nexuses, err := ListNexusesInCluster()
if err != nil {
errs.Accumulate(err)
} else {
nexusCount = len(nexuses)
}
}
// check replicas
{
replicas, err := ListReplicasInCluster()
if err != nil {
errs.Accumulate(err)
} else {
replicaCount = len(replicas)
}
}
// check nvmeControllers
{
nvmeControllers, err := ListNvmeControllersInCluster()
if err != nil {
errs.Accumulate(err)
} else {
nvmeControllerCount = len(nvmeControllers)
}
}
} else {
errs.Accumulate(fmt.Errorf("gRPC calls to mayastor are not enabled, all checks cannot be run"))
}
if errs.GetError() != nil {
logf.Log.Info("VerifyMayastorResourceUsageIsZero: ", "errors", errs.GetError())
}
logf.Log.Info("VerifyMayastorResourceUsageIsZero: ", "allPoolsOnline", allPoolsOnline, "mspUsage", mspUsage, "poolUsage", poolUsage, "msvCount", msvCount, "nexusCount", nexusCount, "nvmeControllerCount", nvmeControllerCount, "replicaCount", replicaCount)
return allPoolsOnline && mspUsage == 0 && poolUsage == 0 && msvCount == 0 && nexusCount == 0 && nvmeControllerCount == 0 && replicaCount == 0 && errs.GetError() == nil
}
9 changes: 7 additions & 2 deletions common/k8stest/util_pvc.go
Original file line number Diff line number Diff line change
Expand Up @@ -540,12 +540,17 @@ func CheckForPVs() (bool, error) {
return foundResources, err
}

// CreatePvc
func CreatePvc(createOpts *coreV1.PersistentVolumeClaim, errBuf *error, uuid *string, wg *sync.WaitGroup) {
// Create the PVC.
pvc, err := gTestEnv.KubeInt.CoreV1().PersistentVolumeClaims(createOpts.ObjectMeta.Namespace).Create(context.TODO(), createOpts, metaV1.CreateOptions{})
*errBuf = err
if pvc != nil {
*uuid = string(pvc.UID)
if err == nil {
if pvc != nil {
*uuid = string(pvc.UID)
} else {
*errBuf = fmt.Errorf("pvc create return nil pvc")
}
}
wg.Done()
}
Expand Down
36 changes: 25 additions & 11 deletions common/k8stest/util_testpods.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ var reCompileOnce sync.Once
var reFioLog *regexp.Regexp = nil
var reFioCritical *regexp.Regexp = nil

func ScanFioPodLogs(pod v1.Pod, synopsisIn *common.E2eFioPodLogSynopsis) *common.E2eFioPodLogSynopsis {
func ScanFioPodLogs(pod v1.Pod, synopsisIn *common.E2eFioPodLogSynopsis, waitSecs int) *common.E2eFioPodLogSynopsis {
var podLogSynopsis *common.E2eFioPodLogSynopsis
if synopsisIn != nil {
podLogSynopsis = synopsisIn
Expand All @@ -383,14 +383,28 @@ func ScanFioPodLogs(pod v1.Pod, synopsisIn *common.E2eFioPodLogSynopsis) *common
logf.Log.Info("WARNING failed to compile regular expression for fio critical failure search")
}
})
podApi := gTestEnv.KubeInt.CoreV1().Pods
for _, container := range pod.Spec.Containers {
opts := v1.PodLogOptions{}
opts.Follow = true
opts.Container = container.Name
{
podCheck, podCheckErr := podApi(pod.Namespace).Get(context.TODO(), pod.Name, metaV1.GetOptions{})
for ix := 0; podCheck.Status.Phase == coreV1.PodPending && ix < waitSecs && podCheckErr == nil; ix++ {
time.Sleep(time.Second * 1)
podCheck, podCheckErr = podApi(pod.Namespace).Get(context.TODO(), pod.Name, metaV1.GetOptions{})
}
if podCheckErr != nil {
podLogSynopsis.Err = podCheckErr
logf.Log.Info("Failed check pod status != pending", "pod", pod.Name, "err", podCheckErr)
return podLogSynopsis
}
pod = *podCheck
}
podLogs, err := gTestEnv.KubeInt.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &opts).Stream(context.TODO())
if err != nil {
podLogSynopsis.Err = err
logf.Log.Info("Failed to stream logs for", "pod", pod, "err", err)
logf.Log.Info("Failed to stream logs for", "pod", pod.Name, "pod.Status", pod.Status.Phase, "err", err)
return podLogSynopsis
}
reader := bufio.NewScanner(podLogs)
Expand All @@ -403,15 +417,15 @@ func ScanFioPodLogs(pod v1.Pod, synopsisIn *common.E2eFioPodLogSynopsis) *common
podLogSynopsis.CriticalFailure = true
}
if strings.HasPrefix(line, "JSON") {
jsondata := line[4:]
jsonData := line[4:]
fTSize := common.FioTargetSizeRecord{}
fExit := common.FioExitRecord{}
ju_err := json.Unmarshal([]byte(jsondata), &fTSize)
if ju_err == nil && fTSize.Size != nil {
juErr := json.Unmarshal([]byte(jsonData), &fTSize)
if juErr == nil && fTSize.Size != nil {
podLogSynopsis.JsonRecords.TargetSizes = append(podLogSynopsis.JsonRecords.TargetSizes, fTSize)
}
ju_err = json.Unmarshal([]byte(jsondata), &fExit)
if ju_err == nil && fExit.ExitValue != nil {
juErr = json.Unmarshal([]byte(jsonData), &fExit)
if juErr == nil && fExit.ExitValue != nil {
podLogSynopsis.JsonRecords.ExitValues = append(podLogSynopsis.JsonRecords.ExitValues, fExit)
}
}
Expand All @@ -428,7 +442,7 @@ func ScanFioPodLogsByName(podName string, nameSpace string) (*common.E2eFioPodLo
if err != nil {
return podLogSynopsis, err
}
return ScanFioPodLogs(*pod, nil), nil
return ScanFioPodLogs(*pod, nil, 60), nil
}

// MonitorE2EFioPod launches a go thread to stream fio pod log output and scan that stream
Expand All @@ -441,7 +455,7 @@ func MonitorE2EFioPod(podName string, nameSpace string) (*common.E2eFioPodOutput
return nil, err
}
go func(synopsis *common.E2eFioPodLogSynopsis, pod v1.Pod) {
ScanFioPodLogs(pod, synopsis)
ScanFioPodLogs(pod, synopsis, 10000)
podOut.Completed = true
}(&podOut.Synopsis, *pod)
podOut.PodName = podName
Expand All @@ -461,7 +475,7 @@ func CheckFioPodCompleted(podName string, nameSpace string) (coreV1.PodPhase, *c
if !containerStatus.Ready {
if containerStatus.State.Terminated != nil &&
containerStatus.State.Terminated.Reason == "Completed" {
podLogSynopsis = ScanFioPodLogs(*pod, nil)
podLogSynopsis = ScanFioPodLogs(*pod, nil, 60)
if containerStatus.State.Terminated.ExitCode == 0 {
return coreV1.PodSucceeded, podLogSynopsis, podLogSynopsis.Err
} else {
Expand All @@ -473,7 +487,7 @@ func CheckFioPodCompleted(podName string, nameSpace string) (coreV1.PodPhase, *c
}
if podLogSynopsis == nil || podLogSynopsis.Err != nil {
if pod.Status.Phase != coreV1.PodRunning && pod.Status.Phase != coreV1.PodPending {
podLogSynopsis = ScanFioPodLogs(*pod, nil)
podLogSynopsis = ScanFioPodLogs(*pod, nil, 60)
} else {
podLogSynopsis = &common.E2eFioPodLogSynopsis{}
}
Expand Down
Loading