chore(controller): deprecate kube-rbac-proxy with controller built-in auth protection #4221
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: ci-test-ginkgo | |
on: | |
push: | |
branches: [main] | |
paths: | |
- "KubeArmor/**" | |
- "tests/**" | |
- "protobuf/**" | |
- ".github/workflows/ci-test-ginkgo.yml" | |
- "pkg/KubeArmorOperator/**" | |
- "deployments/helm/**" | |
pull_request: | |
branches: [main] | |
paths: | |
- "KubeArmor/**" | |
- "tests/**" | |
- "protobuf/**" | |
- ".github/workflows/ci-test-ginkgo.yml" | |
- "examples/multiubuntu/build/**" | |
- "pkg/KubeArmorOperator/**" | |
- "deployments/helm/**" | |
# Declare default permissions as read only. | |
permissions: read-all | |
jobs: | |
build: | |
name: Auto-testing Framework / ${{ matrix.os }} / ${{ matrix.runtime }} | |
runs-on: ${{ matrix.os }} | |
env: | |
RUNTIME: ${{ matrix.runtime }} | |
strategy: | |
fail-fast: false | |
matrix: | |
os: [ubuntu-20.04] | |
runtime: ["containerd", "crio"] | |
steps: | |
- uses: actions/checkout@v3 | |
with: | |
submodules: true | |
- uses: actions/setup-go@v5 | |
with: | |
go-version-file: 'KubeArmor/go.mod' | |
- name: Check what paths were updated | |
uses: dorny/paths-filter@v2 | |
id: filter | |
with: | |
filters: | | |
controller: | |
- 'pkg/KubeArmorController/**' | |
- name: Install the latest LLVM toolchain | |
run: ./.github/workflows/install-llvm.sh | |
- name: Compile libbpf | |
run: ./.github/workflows/install-libbpf.sh | |
- name: Setup a Kubernetes environment | |
run: ./.github/workflows/install-k3s.sh | |
- name: Generate KubeArmor artifacts | |
run: | | |
#set the $IS_COVERAGE env var to 'true' to build the kubearmor-test image for coverage calculation | |
export IS_COVERAGE=true | |
GITHUB_SHA=$GITHUB_SHA ./KubeArmor/build/build_kubearmor.sh | |
- name: Build Kubearmor-Operator | |
working-directory: pkg/KubeArmorOperator | |
run: | | |
make docker-build | |
- name: Build KubeArmorController | |
if: steps.filter.outputs.controller == 'true' | |
run: make -C pkg/KubeArmorController/ docker-build TAG=latest | |
- name: deploy pre existing pod | |
run: | | |
kubectl apply -f ./tests/k8s_env/ksp/pre-run-pod.yaml | |
sleep 60 | |
kubectl get pods -A | |
- name: Run KubeArmor | |
run: | | |
if [[ ${{ matrix.runtime }} == "containerd" ]]; then | |
docker save kubearmor/kubearmor-test-init:latest | sudo k3s ctr images import - | |
docker save kubearmor/kubearmor-test:latest | sudo k3s ctr images import - | |
docker save kubearmor/kubearmor-operator:latest | sudo k3s ctr images import - | |
docker save kubearmor/kubearmor-snitch:latest | sudo k3s ctr images import - | |
if [[ ${{ steps.filter.outputs.controller }} == 'true' ]]; then | |
docker save kubearmor/kubearmor-controller:latest | sudo k3s ctr images import - | |
fi | |
else | |
if [ ${{ matrix.runtime }} == "crio" ]; then | |
docker save kubearmor/kubearmor-test-init:latest | sudo podman load | |
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-test-init:latest | |
docker save kubearmor/kubearmor-test:latest | sudo podman load | |
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-test:latest | |
docker save kubearmor/kubearmor-operator:latest | sudo podman load | |
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-operator:latest | |
docker save kubearmor/kubearmor-snitch:latest | sudo podman load | |
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-snitch:latest | |
if [ ${{ steps.filter.outputs.controller }} == 'true' ]; then | |
docker save kubearmor/kubearmor-controller:latest | sudo podman load | |
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-controller:latest | |
fi | |
fi | |
fi | |
docker system prune -a -f | |
docker buildx prune -a -f | |
helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=latest | |
kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator | |
kubectl get pods -A | |
if [[ ${{ steps.filter.outputs.controller }} == 'true' ]]; then | |
kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-coverage.yaml --dry-run=client -o json | \ | |
jq '.spec.kubearmorControllerImage.imagePullPolicy = "Never"' | \ | |
kubectl apply -f - | |
else | |
kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-coverage.yaml | |
fi | |
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test | |
kubectl wait --timeout=7m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch,kubearmor-app!=kubearmor-controller -n kubearmor | |
kubectl wait --timeout=1m --for=condition=ready pod -l kubearmor-app=kubearmor-controller -n kubearmor | |
kubectl get pods -A | |
sleep 10 | |
DAEMONSET_NAME=$(kubectl get daemonset -n kubearmor -o jsonpath='{.items[0].metadata.name}') | |
echo "DaemonSet: $DAEMONSET_NAME" | |
kubectl patch daemonset $DAEMONSET_NAME -n kubearmor --type='json' -p='[ | |
{ | |
"op": "add", | |
"path": "/spec/template/spec/volumes/-", | |
"value": { | |
"name": "coverage-storage", | |
"hostPath": { | |
"path": "/coverage", | |
"type": "DirectoryOrCreate" | |
} | |
} | |
}, | |
{ | |
"op": "add", | |
"path": "/spec/template/spec/containers/0/volumeMounts/-", | |
"value": { | |
"mountPath": "/coverage", | |
"name": "coverage-storage" | |
} | |
}, | |
{ | |
"op": "add", | |
"path": "/spec/template/spec/containers/0/args/-", | |
"value": "-test.coverprofile=/coverage/coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out" | |
} | |
]' | |
sleep 15 | |
- name: Get KubeArmor POD info | |
run: | | |
DAEMONSET_NAME=$(kubectl get daemonset -n kubearmor -o jsonpath='{.items[0].metadata.name}') | |
LABEL_SELECTOR=$(kubectl get daemonset $DAEMONSET_NAME -n kubearmor -o jsonpath='{.spec.selector.matchLabels}' | jq -r 'to_entries[] | "\(.key)=\(.value)"' | paste -sd, -) | |
POD_NAME=$(kubectl get pods -n kubearmor -l "$LABEL_SELECTOR" -o jsonpath='{.items[*].metadata.name}') | |
echo "Pod: $POD_NAME" | |
echo "POD_NAME=$POD_NAME" >> $GITHUB_ENV | |
- name: Test KubeArmor using Ginkgo | |
run: | | |
go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo | |
make | |
working-directory: ./tests/k8s_env | |
timeout-minutes: 30 | |
- name: Kill KubeArmor prcoess in the pod | |
run: | | |
KUBEARMOR_PID=$(kubectl exec ${{ env.POD_NAME }} -n kubearmor -c kubearmor -- sh -c "ps aux | grep '[K]ubeArmor/kubearmor-test' | awk '{print \$1}'") | |
kubectl exec ${{ env.POD_NAME }} -n kubearmor -c kubearmor -- sh -c "kill -s SIGINT $KUBEARMOR_PID" | |
sleep 10 | |
env: | |
POD_NAME: ${{ env.POD_NAME }} | |
- name: Extract coverage file | |
run: | | |
for i in {1..24}; do | |
if [ -f /coverage/coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out ]; then | |
cp /coverage/coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out | |
break | |
fi | |
sleep 5 | |
done | |
ls -l | |
working-directory: KubeArmor | |
- name: Get karmor sysdump | |
if: ${{ failure() }} | |
run: | | |
kubectl describe pod -n kubearmor -l kubearmor-app=kubearmor | |
curl -sfL http://get.kubearmor.io/ | sudo sh -s -- -b /usr/local/bin | |
mkdir -p /tmp/kubearmor/ && cd /tmp/kubearmor && karmor sysdump | |
- name: Archive log artifacts | |
if: ${{ failure() }} | |
uses: actions/upload-artifact@v3 | |
with: | |
name: kubearmor.logs | |
path: | | |
/tmp/kubearmor/ | |
/tmp/kubearmor.* | |
- name: Measure code coverage | |
if: ${{ always() }} | |
run: | | |
ls -l | |
go tool cover -func coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out | |
working-directory: KubeArmor | |
env: | |
GOPATH: /home/runner/go | |
- name: Upload coverage file | |
if: ${{ always() }} | |
uses: actions/upload-artifact@v4 | |
with: | |
name: coverage-k8s-${{ matrix.os }}-${{ matrix.runtime }} | |
path: KubeArmor/coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out |