Skip to content

Commit

Permalink
Remove fetching of container logs from initializer
Browse files Browse the repository at this point in the history
Following the focus on the exit code of the command using in the initializer job (Pod),
this commit removes the fetching of the container log.

There was only a basic JSON unmarshalling applied with no interpretion of what it contained.
This is either covered by `k6 inspect` exiting with rc=0 or should be added to the initializer
job.

If further details about the failure reason needs to be read from the initalizer job, this houdl be the termination message.

[1] https://kubernetes.io/docs/tasks/debug/debug-application/determine-reason-pod-failure/#customizing-the-termination-message
  • Loading branch information
frittentheke committed Aug 22, 2024
1 parent 2868535 commit d302e11
Showing 1 changed file with 1 addition and 54 deletions.
55 changes: 1 addition & 54 deletions controllers/common.go
Original file line number Diff line number Diff line change
@@ -1,24 +1,17 @@
package controllers

import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"time"

"github.com/go-logr/logr"
"github.com/grafana/k6-operator/api/v1alpha1"
"github.com/grafana/k6-operator/pkg/cloud"
"github.com/grafana/k6-operator/pkg/testrun"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
)

Expand Down Expand Up @@ -65,52 +58,6 @@ func inspectTestRun(ctx context.Context, log logr.Logger, k6 v1alpha1.TestRunI,
return
}

// Here we need to get the output of the pod
// pods/log is not currently supported by controller-runtime client and it is officially
// recommended to use REST client instead:
// https://github.com/kubernetes-sigs/controller-runtime/issues/1229

// TODO: if the below errors repeat several times, it'd be a real error case scenario.
// How likely is it? Should we track frequency of these errors here?
config, err := rest.InClusterConfig()
if err != nil {
log.Error(err, "unable to fetch in-cluster REST config")
returnErr = err
return
}

clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Error(err, "unable to get access to clientset")
returnErr = err
return
}
req := clientset.CoreV1().Pods(k6.NamespacedName().Namespace).GetLogs(podList.Items[0].Name, &corev1.PodLogOptions{
Container: "k6",
})
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()

podLogs, err := req.Stream(ctx)
if err != nil {
log.Error(err, "unable to stream logs from the pod")
returnErr = err
return
}
defer podLogs.Close()

buf := new(bytes.Buffer)
_, returnErr = io.Copy(buf, podLogs)
if err != nil {
log.Error(err, "unable to copy logs from the pod")
return
}

if returnErr = json.Unmarshal(buf.Bytes(), &inspectOutput); returnErr != nil {
// this shouldn't normally happen but if it does, let's log output by default
log.Error(returnErr, fmt.Sprintf("unable to marshal: `%s`", buf.String()))
}

ready = true
return
}
Expand Down Expand Up @@ -201,7 +148,7 @@ func (r *TestRunReconciler) hostnames(ctx context.Context, log logr.Logger, abor
err error
)

sl := &v1.ServiceList{}
sl := &corev1.ServiceList{}

if err = r.List(ctx, sl, opts); err != nil {
log.Error(err, "Could not list services")
Expand Down

0 comments on commit d302e11

Please sign in to comment.