Skip to content

Commit

Permalink
Merge branch 'kubearmor:main' into k8s_testing
Browse files Browse the repository at this point in the history
  • Loading branch information
Ishaanj18 authored May 31, 2024
2 parents a2e5472 + bbdc04d commit 135df34
Show file tree
Hide file tree
Showing 14 changed files with 203 additions and 132 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci-latest-helm-chart-release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ permissions: read-all
jobs:
publish-chart:
name: Update Stable Helm Chart With Latest Changes
if: ${{ (github.repository == 'kubearmor/kubearmor') && (!contains(github.event.head_commit.message, '[skip ci]')) }}
if: ${{ (github.repository == 'kubearmor/kubearmor') }}
runs-on: ubuntu-20.04
permissions:
contents: write
Expand Down
14 changes: 7 additions & 7 deletions .github/workflows/ci-latest-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ jobs:
if: github.repository == 'kubearmor/kubearmor' && (needs.check.outputs.kubearmor == 'true' || ${{ github.ref }} != 'refs/heads/main')
runs-on: ubuntu-latest-16-cores
permissions:
id-token: write
id-token: write
timeout-minutes: 120
steps:
- uses: actions/checkout@v3
Expand Down Expand Up @@ -81,7 +81,7 @@ jobs:
run: |
make docker-build TAG=${{ steps.vars.outputs.tag }}
- name: deploy pre existing pod
- name: deploy pre existing pod
run: |
kubectl apply -f ./tests/k8s_env/ksp/pre-run-pod.yaml
sleep 60
Expand All @@ -93,7 +93,7 @@ jobs:
docker save kubearmor/kubearmor:${{ steps.vars.outputs.tag }} | sudo k3s ctr images import -
docker save kubearmor/kubearmor-operator:${{ steps.vars.outputs.tag }} | sudo k3s ctr images import -
docker save kubearmor/kubearmor-snitch:${{ steps.vars.outputs.tag }} | sudo k3s ctr images import -
helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=${{ steps.vars.outputs.tag }}
kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator
kubectl get pods -A
Expand Down Expand Up @@ -145,12 +145,12 @@ jobs:
- name: Push KubeArmor images to Docker
run: GITHUB_SHA=$GITHUB_SHA ./KubeArmor/build/push_kubearmor.sh ${{ steps.vars.outputs.tag }}

- name: Install Cosign
- name: Install Cosign
uses: sigstore/cosign-installer@main

- name: Get Image Digest
id: digest
run: |
run: |
echo "imagedigest=$(jq -r '.["containerimage.digest"]' kubearmor.json)" >> $GITHUB_OUTPUT
echo "initdigest=$(jq -r '.["containerimage.digest"]' kubearmor-init.json)" >> $GITHUB_OUTPUT
echo "ubidigest=$(jq -r '.["containerimage.digest"]' kubearmor-ubi.json)" >> $GITHUB_OUTPUT
Expand Down Expand Up @@ -207,7 +207,7 @@ jobs:
regctl image copy kubearmor/kubearmor:$STABLE_VERSION kubearmor/kubearmor:stable --digest-tags
regctl image copy kubearmor/kubearmor-ubi:$STABLE_VERSION kubearmor/kubearmor-ubi:stable --digest-tags
regctl image copy kubearmor/kubearmor-controller:$STABLE_VERSION kubearmor/kubearmor-controller:stable --digest-tags
kubearmor-controller-release:
name: Build & Push KubeArmorController
needs: check
Expand All @@ -223,7 +223,7 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version-file: 'KubeArmor/go.mod'

- name: Set up QEMU
uses: docker/setup-qemu-action@v2

Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/ci-marketplace-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ on:
- "STABLE-RELEASE"
- ".github/workflows/ci-marketplace-release.yml"

# Declare default permissions as read only.
permissions: read-all

jobs:
certify-images-on-redhat:
runs-on: ubuntu-latest
Expand Down Expand Up @@ -249,4 +252,4 @@ jobs:
Assignees: @kubearmor/triagers
Refer the documentation [here](https://github.com/kubearmor/KubeArmor/wiki/Update-KubeArmor-Marketplace-Releases) for update listing instructions.
Refer the documentation [here](https://github.com/kubearmor/KubeArmor/wiki/Update-KubeArmor-Marketplace-Releases) for update listing instructions.
55 changes: 50 additions & 5 deletions .github/workflows/ci-systemd-release.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,19 @@
name: ci-systemd-release

on:
workflow_dispatch:
inputs:
tag:
description: "Release tag which has to be updated"
type: "string"
required: true
push:
tags:
- "*"

# Declare default permissions as read only.
permissions: read-all

jobs:
goreleaser:
runs-on: ubuntu-20.04
Expand All @@ -16,34 +25,70 @@ jobs:
- uses: actions/checkout@v3
with:
submodules: true
fetch-depth: 0

- uses: actions/setup-go@v5
with:
go-version-file: 'KubeArmor/go.mod'


- name: Install the latest LLVM toolchain
run: ./.github/workflows/install-llvm.sh

- name: Compile libbpf
run: ./.github/workflows/install-libbpf.sh

- name: Install Cosign
uses: sigstore/cosign-installer@main

- name: Install karmor
run: curl -sfL https://raw.githubusercontent.com/kubearmor/kubearmor-client/main/install.sh | sudo sh -s -- -b .
working-directory: KubeArmor

- name: Build KubeArmor object files
run: make
run: make
working-directory: KubeArmor/BPF


- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_AUTHTOK }}

- name: Get release tag
id: vars
run: |
cp KubeArmor/.goreleaser.yaml /tmp/.goreleaser.yaml
if [[ ${{ github.event_name }} == "workflow_dispatch" ]]; then
# checkout branch but use goreleaser config from latest
echo "Checking out tag: ${{ inputs.tag }}"
git checkout ${{ inputs.tag }}
echo "GORELEASER_CURRENT_TAG=${{ inputs.tag }}" >> $GITHUB_OUTPUT
REF=${{ inputs.tag }}
echo "tag=${REF#v}" >> $GITHUB_OUTPUT
else
REF=${GITHUB_REF#refs/*/}
echo "tag=${REF#v}" >> $GITHUB_OUTPUT
fi
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v5
with:
distribution: goreleaser
version: v1.25.0
args: release --clean
args: release --config=/tmp/.goreleaser.yaml
workdir: KubeArmor
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GORELEASER_CURRENT_TAG: ${{ steps.vars.outputs.GORELEASER_CURRENT_TAG }}

- name: Setup ORAS
uses: oras-project/setup-oras@v1
with:
version: 1.0.0

- name: Publish release artifacts to Dockerhub
working-directory: KubeArmor/dist
run: |
oras push docker.io/kubearmor/kubearmor-systemd:${{ steps.vars.outputs.tag }}_linux-amd64 kubearmor_${{ steps.vars.outputs.tag }}_linux-amd64.tar.gz
oras push docker.io/kubearmor/kubearmor-systemd:${{ steps.vars.outputs.tag }}_linux-arm64 kubearmor_${{ steps.vars.outputs.tag }}_linux-arm64.tar.gz
9 changes: 8 additions & 1 deletion KubeArmor/.goreleaser.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,13 @@ builds:
goarch:
- amd64
- arm64
env:
- CGO_ENABLED=0

release:
replace_existing_artifacts: true
mode: replace
make_latest: false

signs:
- cmd: cosign
Expand All @@ -20,7 +27,7 @@ signs:
- --yes
artifacts: all
output: true

archives:
- id: "kubearmor"
builds:
Expand Down
18 changes: 18 additions & 0 deletions KubeArmor/core/k8sHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -591,6 +591,24 @@ func getTopLevelOwner(obj metav1.ObjectMeta, namespace string, objkind string) (
if len(pod.OwnerReferences) > 0 {
return getTopLevelOwner(pod.ObjectMeta, namespace, "Pod")
}
case "Job":
job, err := K8s.K8sClient.BatchV1().Jobs(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
return "", "", "", err
}
if len(job.OwnerReferences) > 0 {
return getTopLevelOwner(job.ObjectMeta, namespace, "CronJob")
}
return job.Name, "Job", job.Namespace, nil
case "CronJob":
cronJob, err := K8s.K8sClient.BatchV1().CronJobs(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
return "", "", "", err
}
if len(cronJob.OwnerReferences) > 0 {
return getTopLevelOwner(cronJob.ObjectMeta, namespace, "CronJob")
}
return cronJob.Name, "CronJob", cronJob.Namespace, nil
case "Deployment":
deployment, err := K8s.K8sClient.AppsV1().Deployments(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
Expand Down
55 changes: 41 additions & 14 deletions KubeArmor/core/kubeUpdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -550,9 +550,6 @@ func (dm *KubeArmorDaemon) HandleUnknownNamespaceNsMap(container *tp.Container)

// WatchK8sPods Function
func (dm *KubeArmorDaemon) WatchK8sPods() {
var controllerName, controller, namespace string
var err error

nodeName := os.Getenv("KUBEARMOR_NODENAME")
if nodeName == "" {
nodeName = cfg.GlobalCfg.Host
Expand Down Expand Up @@ -591,29 +588,43 @@ func (dm *KubeArmorDaemon) WatchK8sPods() {
pod.Metadata["namespaceName"] = event.Object.ObjectMeta.Namespace
pod.Metadata["podName"] = event.Object.ObjectMeta.Name

var controllerName, controller, namespace string
var err error

if event.Type == "ADDED" {
controllerName, controller, namespace, err = getTopLevelOwner(event.Object.ObjectMeta, event.Object.Namespace, event.Object.Kind)
if err != nil {
dm.Logger.Warnf("Failed to get ownerRef (%s, %s)", event.Object.ObjectMeta.Name, err.Error())
}

owner := tp.PodOwner{
Name: controllerName,
Ref: controller,
Namespace: namespace,
}

dm.OwnerInfo[pod.Metadata["podName"]] = owner
podOwnerName = controllerName
}
_, err := K8s.K8sClient.CoreV1().Pods(namespace).Get(context.Background(), event.Object.ObjectMeta.Name, metav1.GetOptions{})
if err == nil && (event.Type == "MODIFIED" || event.Type != "DELETED") {

// for event = "MODIFIED" we first check pod's existence to update current dm.OwnerInfo of the pod, because when pod is in terminating state then we cannot get the owner info from it.
// we do not update owner info in terminating state. After pod is deleted we delete the owner info from the map.
_, err = K8s.K8sClient.CoreV1().Pods(namespace).Get(context.Background(), event.Object.ObjectMeta.Name, metav1.GetOptions{})
if err == nil && event.Type == "MODIFIED" {
controllerName, controller, namespace, err = getTopLevelOwner(event.Object.ObjectMeta, event.Object.Namespace, event.Object.Kind)
if err != nil {
dm.Logger.Warnf("Failed to get ownerRef (%s, %s)", event.Object.ObjectMeta.Name, err.Error())
}
}

owner := tp.PodOwner{
Name: controllerName,
Ref: controller,
Namespace: namespace,
}

dm.OwnerInfo[pod.Metadata["podName"]] = owner
owner := tp.PodOwner{
Name: controllerName,
Ref: controller,
Namespace: namespace,
}

podOwnerName = controllerName
dm.OwnerInfo[pod.Metadata["podName"]] = owner
podOwnerName = controllerName
}

//get the owner , then check if that owner has owner if...do it recusivelt until you get the no owner

Expand Down Expand Up @@ -763,6 +774,22 @@ func (dm *KubeArmorDaemon) WatchK8sPods() {
}
}

} else if dm.OwnerInfo[pod.Metadata["podName"]].Ref == "Job" {
job, err := K8s.K8sClient.BatchV1().Jobs(pod.Metadata["namespaceName"]).Get(context.Background(), podOwnerName, metav1.GetOptions{})
if err == nil {
for _, c := range job.Spec.Template.Spec.Containers {
containers = append(containers, c.Name)
}
}

} else if dm.OwnerInfo[pod.Metadata["podName"]].Ref == "CronJob" {
cronJob, err := K8s.K8sClient.BatchV1().CronJobs(pod.Metadata["namespaceName"]).Get(context.Background(), podOwnerName, metav1.GetOptions{})
if err == nil {
for _, c := range cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers {
containers = append(containers, c.Name)
}
}

}

}
Expand Down
2 changes: 1 addition & 1 deletion KubeArmor/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ require (
k8s.io/apimachinery v0.29.0
k8s.io/client-go v0.29.0
k8s.io/cri-api v0.29.0
k8s.io/klog/v2 v2.120.0
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
sigs.k8s.io/controller-runtime v0.15.3
)
Expand Down Expand Up @@ -130,7 +131,6 @@ require (
gotest.tools/v3 v3.4.0 // indirect
k8s.io/apiextensions-apiserver v0.29.0 // indirect
k8s.io/component-base v0.29.0 // indirect
k8s.io/klog/v2 v2.120.0 // indirect
k8s.io/kube-openapi v0.0.0-20240105020646-a37d4de58910 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
Expand Down
5 changes: 5 additions & 0 deletions deployments/get/objects.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,11 @@ func GetClusterRole() *rbacv1.ClusterRole {
Resources: []string{"deployments", "replicasets", "daemonsets", "statefulsets"},
Verbs: []string{"get", "patch", "list", "watch", "update"},
},
{
APIGroups: []string{"batch"},
Resources: []string{"jobs", "cronjobs"},
Verbs: []string{"get"},
},
{
APIGroups: []string{"security.kubearmor.com"},
Resources: []string{"kubearmorpolicies", "kubearmorhostpolicies"},
Expand Down
7 changes: 7 additions & 0 deletions deployments/helm/KubeArmor/templates/RBAC/roles.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,13 @@ rules:
- list
- watch
- update
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- get
- apiGroups:
- security.kubearmor.com
resources:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,13 @@ rules:
- list
- watch
- update
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- get
- apiGroups:
- security.kubearmor.com
resources:
Expand Down
7 changes: 7 additions & 0 deletions pkg/KubeArmorOperator/config/rbac/clusterrole.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,13 @@ rules:
- list
- watch
- update
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- get
- apiGroups:
- security.kubearmor.com
resources:
Expand Down
2 changes: 1 addition & 1 deletion pkg/KubeArmorOperator/internal/controller/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -701,7 +701,7 @@ func (clusterWatcher *ClusterWatcher) DeleteAllTlsSecrets() error {
for _, secret := range tlsSecrets {
err := clusterWatcher.Client.CoreV1().Secrets(common.Namespace).Delete(context.Background(), secret, v1.DeleteOptions{})
if err != nil {
clusterWatcher.Log.Errorf("error while deleing secret: %s", secret)
clusterWatcher.Log.Errorf("error while deleing secret: %s, error=%s", secret, err.Error())
return err
}
}
Expand Down
Loading

0 comments on commit 135df34

Please sign in to comment.