diff --git a/Dockerfile.init b/Dockerfile.init index 290f81788f..edcad02e48 100644 --- a/Dockerfile.init +++ b/Dockerfile.init @@ -2,6 +2,29 @@ # Copyright 2021 Authors of KubeArmor ### Make compiler image + +FROM golang:1.22-alpine3.20 AS builder +RUN apk --no-cache update +RUN apk add --no-cache git clang llvm make gcc protobuf +RUN apk add --no-cache linux-headers pkgconfig +RUN apk add --no-cache gpgme-dev +RUN apk add --no-cache btrfs-progs-dev +ARG GOARCH +ARG GOOS + +WORKDIR /KubeArmor + +COPY . . +WORKDIR /KubeArmor/KubeArmor + +RUN go mod download + +WORKDIR /KubeArmor/KubeArmor/deployHook +RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -o deployHook . + +WORKDIR /KubeArmor/KubeArmor/hook +RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -tags 'containers_image_openpgp' -o hook . + FROM redhat/ubi9-minimal as kubearmor-init ARG VERSION=latest @@ -34,7 +57,15 @@ RUN groupadd --gid 1000 default \ COPY LICENSE /licenses/license.txt COPY ./KubeArmor/BPF /KubeArmor/BPF/ COPY ./KubeArmor/build/compile.sh /KubeArmor/compile.sh +COPY --from=builder /KubeArmor/KubeArmor/hook/hook /hook +COPY --from=builder /KubeArmor/KubeArmor/deployHook/deployHook /KubeArmor/deployHook + +# Copy the custom entrypoint script +COPY ./KubeArmor/build/entrypoint.sh /KubeArmor/entrypoint.sh +RUN chmod +x /KubeArmor/entrypoint.sh + RUN chown -R default:default /KubeArmor USER 1000 -ENTRYPOINT ["/KubeArmor/compile.sh"] + +ENTRYPOINT ["/KubeArmor/entrypoint.sh"] diff --git a/KubeArmor/build/entrypoint.sh b/KubeArmor/build/entrypoint.sh new file mode 100644 index 0000000000..73bbb8b82d --- /dev/null +++ b/KubeArmor/build/entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +/KubeArmor/compile.sh + +/KubeArmor/deployHook diff --git a/KubeArmor/common/common.go b/KubeArmor/common/common.go index 284a549838..3af93c2d22 100644 --- a/KubeArmor/common/common.go +++ b/KubeArmor/common/common.go @@ -413,7 +413,7 @@ func IsK8sEnv() bool { } // ContainerRuntimeSocketKeys contains FIFO ordered keys of container runtimes -var ContainerRuntimeSocketKeys = []string{"docker", "containerd", "cri-o"} +var ContainerRuntimeSocketKeys = []string{"docker", "containerd", "cri-o","podman"} // ContainerRuntimeSocketMap Structure var ContainerRuntimeSocketMap = map[string][]string{ @@ -432,6 +432,9 @@ var ContainerRuntimeSocketMap = map[string][]string{ "/var/run/crio/crio.sock", "/run/crio/crio.sock", }, + "podman":{ + "/run/podman/podman.sock", + }, } // GetCRISocket Function diff --git a/KubeArmor/core/dockerHandler.go b/KubeArmor/core/dockerHandler.go index 87980df627..c93afe75ae 100644 --- a/KubeArmor/core/dockerHandler.go +++ b/KubeArmor/core/dockerHandler.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/kubearmor/KubeArmor/KubeArmor/common" @@ -266,7 +267,7 @@ func (dm *KubeArmorDaemon) GetAlreadyDeployedDockerContainers() { } } - if containerList, err := Docker.DockerClient.ContainerList(context.Background(), types.ContainerListOptions{}); err == nil { + if containerList, err := Docker.DockerClient.ContainerList(context.Background(), container.ListOptions{}); err == nil { for _, dcontainer := range containerList { // get container information from docker client container, err := Docker.GetContainerInfo(dcontainer.ID, dm.OwnerInfo) diff --git a/KubeArmor/core/hook_handler.go b/KubeArmor/core/hook_handler.go new file mode 100644 index 0000000000..4d9c81ce35 --- /dev/null +++ b/KubeArmor/core/hook_handler.go @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + +package core + +import ( + "encoding/json" + "errors" + "io" + "log" + "net" + "os" + "path/filepath" + "sync/atomic" + + kl "github.com/kubearmor/KubeArmor/KubeArmor/common" + cfg "github.com/kubearmor/KubeArmor/KubeArmor/config" + "github.com/kubearmor/KubeArmor/KubeArmor/types" +) + +const kubearmorDir = "/var/run/kubearmor" + +// ContainerEngineHandler defines the interface that any container engine must implement if supports OCI hook +type ContainerEngineHandler interface { + HandleCreateContainer(container types.Container) + HandleDeleteContainer(containerID string) +} + +type PodmanHandler struct { + daemon *KubeArmorDaemon +} +func NewPodmanHandler(dm *KubeArmorDaemon) *PodmanHandler { + return &PodmanHandler{daemon: dm} +} +func (p *PodmanHandler) HandleCreateContainer(container types.Container) { + p.daemon.UpdatePodmanContainer(container.ContainerID, container, "create") +} +func (p *PodmanHandler) HandleDeleteContainer(containerID string) { + p.daemon.UpdatePodmanContainer(containerID, p.daemon.Containers[containerID], "destroy") +} + + +type CRIOHandler struct { + daemon *KubeArmorDaemon +} +func NewCRIOHandler(dm *KubeArmorDaemon) *CRIOHandler { + return &CRIOHandler{daemon: dm} +} +func (c *CRIOHandler) HandleCreateContainer(container types.Container) { + c.daemon.handleContainerCreate(container) +} +func (c *CRIOHandler) HandleDeleteContainer(containerID string) { + c.daemon.handleContainerDelete(containerID) +} + +// ListenToHook starts listening on a UNIX socket and waits for container hooks +// to pass new containers +func (dm *KubeArmorDaemon) ListenToHook() { + if err := os.MkdirAll(kubearmorDir, 0750); err != nil { + log.Fatal(err) + } + + listenPath := filepath.Join(kubearmorDir, "ka.sock") + err := os.Remove(listenPath) // in case kubearmor crashed and the socket wasn't removed (cleaning the socket file if got crashed) + if err != nil && !errors.Is(err, os.ErrNotExist) { + log.Fatal(err) + } + + socket, err := net.Listen("unix", listenPath) + if err != nil { + log.Fatal(err) + } + + defer socket.Close() + defer os.Remove(listenPath) + ready := &atomic.Bool{} + + for { + conn, err := socket.Accept() + if err != nil { + log.Fatal(err) + } + + go dm.handleConn(conn, ready) + } + +} + +// handleConn gets container details from container hooks. +func (dm *KubeArmorDaemon) handleConn(conn net.Conn, ready *atomic.Bool) { + // We need to makes sure that no containers accepted until all containers created before KubeArmor + // are sent first. This is done mainly to avoid race conditions between hooks sending in + // data that some containers were deleted only for process responsible for sending previous containers + // to send that these containers are created. Which will leave KubeArmor in an incorrect state. + defer conn.Close() + buf := make([]byte, 4096) + + for { + n, err := conn.Read(buf) + if err == io.EOF { + return + } + if err != nil { + log.Fatal(err) + } + + data := types.HookRequest{} + + err = json.Unmarshal(buf[:n], &data) + if err != nil { + log.Fatal(err) + } + + if data.Detached { + // we want KubeArmor to start accepting containers after + // all previous container are set + defer ready.Store(true) + } else if !ready.Load() { + _, err = conn.Write([]byte("err")) + if err == io.EOF { + return + } else if err != nil { + log.Println(err) + return + } + continue + } + _, err = conn.Write([]byte("ok")) + if err == io.EOF { + return + } else if err != nil { + log.Println(err) + return + } + + containerLabels,_ := kl.GetLabelsFromString(data.Container.Labels) + // Determine which engine is being used (Podman or CRI-O for now support OCI hooks) + var handler ContainerEngineHandler + if containerLabels["containerType"] == "podman" { + handler = NewPodmanHandler(dm) + } else { + handler = NewCRIOHandler(dm) + } + + // Handle the container create or delete event + if data.Operation == types.HookContainerCreate { + handler.HandleCreateContainer(data.Container) + } else { + handler.HandleDeleteContainer(data.Container.ContainerID) + } + + } +} +func (dm *KubeArmorDaemon) handleContainerCreate(container types.Container) { + endpoint := types.EndPoint{} + + dm.Logger.Printf("added %s", container.ContainerID) + + dm.ContainersLock.Lock() + defer dm.ContainersLock.Unlock() + if _, ok := dm.Containers[container.ContainerID]; !ok { + dm.Containers[container.ContainerID] = container + } else if dm.Containers[container.ContainerID].PidNS == 0 && dm.Containers[container.ContainerID].MntNS == 0 { + c := dm.Containers[container.ContainerID] + c.MntNS = container.MntNS + c.PidNS = container.PidNS + c.AppArmorProfile = container.AppArmorProfile + dm.Containers[c.ContainerID] = c + + dm.EndPointsLock.Lock() + for idx, endPoint := range dm.EndPoints { + if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) { + + // update apparmor profiles + if !kl.ContainsElement(endPoint.AppArmorProfiles, container.AppArmorProfile) { + dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles, container.AppArmorProfile) + } + + if container.Privileged && dm.EndPoints[idx].PrivilegedContainers != nil { + dm.EndPoints[idx].PrivilegedContainers[container.ContainerName] = struct{}{} + } + + endpoint = dm.EndPoints[idx] + + break + } + } + dm.EndPointsLock.Unlock() + } + + if len(dm.OwnerInfo) > 0 { + container.Owner = dm.OwnerInfo[container.EndPointName] + } + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + dm.SystemMonitor.AddContainerIDToNsMap(container.ContainerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.RegisterContainer(container.ContainerID, container.PidNS, container.MntNS) + + if len(endpoint.SecurityPolicies) > 0 { // struct can be empty or no policies registered for the endpoint yet + dm.Logger.UpdateSecurityPolicies("ADDED", endpoint) + if dm.RuntimeEnforcer != nil && endpoint.PolicyEnabled == types.KubeArmorPolicyEnabled { + // enforce security policies + dm.RuntimeEnforcer.UpdateSecurityPolicies(endpoint) + } + } + } +} +func (dm *KubeArmorDaemon) handleContainerDelete(containerID string) { + dm.ContainersLock.Lock() + container, ok := dm.Containers[containerID] + dm.Logger.Printf("deleted %s", containerID) + if !ok { + dm.ContainersLock.Unlock() + return + } + delete(dm.Containers, containerID) + dm.ContainersLock.Unlock() + + dm.EndPointsLock.Lock() + for idx, endPoint := range dm.EndPoints { + if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) { + + // update apparmor profiles + for idxA, profile := range endPoint.AppArmorProfiles { + if profile == container.AppArmorProfile { + dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles[:idxA], dm.EndPoints[idx].AppArmorProfiles[idxA+1:]...) + break + } + } + + break + } + } + dm.EndPointsLock.Unlock() + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + // update NsMap + dm.SystemMonitor.DeleteContainerIDFromNsMap(containerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.UnregisterContainer(containerID) + } + +} + + + diff --git a/KubeArmor/core/kubeArmor.go b/KubeArmor/core/kubeArmor.go index 9951150ba4..f891515ced 100644 --- a/KubeArmor/core/kubeArmor.go +++ b/KubeArmor/core/kubeArmor.go @@ -591,6 +591,8 @@ func KubeArmor() { } else if strings.Contains(cfg.GlobalCfg.CRISocket, "cri-o") { // monitor crio events go dm.MonitorCrioEvents() + } else if strings.Contains(cfg.GlobalCfg.CRISocket, "podman") { + go dm.ListenToHook() } else { dm.Logger.Warnf("Failed to monitor containers: %s is not a supported CRI socket.", cfg.GlobalCfg.CRISocket) enableContainerPolicy = false diff --git a/KubeArmor/core/podmanHandler.go b/KubeArmor/core/podmanHandler.go new file mode 100644 index 0000000000..f21fa5e11b --- /dev/null +++ b/KubeArmor/core/podmanHandler.go @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 Authors of KubeArmor + +// Package core is responsible for initiating and maintaining interactions between external entities like K8s,CRIs and internal KubeArmor entities like eBPF Monitor and Log Feeders +package core + +import ( + "github.com/kubearmor/KubeArmor/KubeArmor/common" + kl "github.com/kubearmor/KubeArmor/KubeArmor/common" + cfg "github.com/kubearmor/KubeArmor/KubeArmor/config" + "github.com/kubearmor/KubeArmor/KubeArmor/state" + tp "github.com/kubearmor/KubeArmor/KubeArmor/types" +) + +// UpdatePodmanContainer Function +func (dm *KubeArmorDaemon) UpdatePodmanContainer(containerID string, container tp.Container, action string) bool { + + if action == "create" { + + if container.ContainerID == "" { + return false + } + + endPoint := tp.EndPoint{} + + dm.ContainersLock.Lock() + if _, ok := dm.Containers[container.ContainerID]; !ok { + dm.Containers[container.ContainerID] = container + dm.ContainersLock.Unlock() + + containerLabels, containerIdentities := common.GetLabelsFromString(container.Labels) + dm.EndPointsLock.Lock() + + endPoint.EndPointName = container.ContainerName + endPoint.ContainerName = container.ContainerName + endPoint.NamespaceName = container.NamespaceName + endPoint.Containers = []string{container.ContainerID} + endPoint.Labels = containerLabels + endPoint.Identities = containerIdentities + endPoint.PolicyEnabled = tp.KubeArmorPolicyEnabled + endPoint.ProcessVisibilityEnabled = true + endPoint.FileVisibilityEnabled = true + endPoint.NetworkVisibilityEnabled = true + endPoint.CapabilitiesVisibilityEnabled = true + + endPoint.AppArmorProfiles = []string{"kubearmor_" + container.ContainerName} + + globalDefaultPosture := tp.DefaultPosture{ + FileAction: cfg.GlobalCfg.DefaultFilePosture, + NetworkAction: cfg.GlobalCfg.DefaultNetworkPosture, + CapabilitiesAction: cfg.GlobalCfg.DefaultCapabilitiesPosture, + } + endPoint.DefaultPosture = globalDefaultPosture + + dm.SecurityPoliciesLock.RLock() + for _, secPol := range dm.SecurityPolicies { + if kl.MatchIdentities(secPol.Spec.Selector.Identities, endPoint.Identities) { + endPoint.SecurityPolicies = append(endPoint.SecurityPolicies, secPol) + } + } + dm.SecurityPoliciesLock.RUnlock() + + dm.EndPoints = append(dm.EndPoints, endPoint) + dm.EndPointsLock.Unlock() + + } else { + dm.ContainersLock.Unlock() + return false + } + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + // for throttling + dm.SystemMonitor.Logger.ContainerNsKey[containerID] = common.OuterKey{ + MntNs: container.MntNS, + PidNs: container.PidNS, + } + + // update NsMap + dm.SystemMonitor.AddContainerIDToNsMap(containerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.RegisterContainer(containerID, container.PidNS, container.MntNS) + + + if len(endPoint.SecurityPolicies) > 0 { // struct can be empty or no policies registered for the endPoint yet + dm.Logger.UpdateSecurityPolicies("ADDED", endPoint) + if dm.RuntimeEnforcer != nil && endPoint.PolicyEnabled == tp.KubeArmorPolicyEnabled { + dm.Logger.Printf("Enforcing security policies for container ID %s",containerID) + // enforce security policies + dm.RuntimeEnforcer.UpdateSecurityPolicies(endPoint) + } + } + } + + if cfg.GlobalCfg.StateAgent { + container.Status = "running" + go dm.StateAgent.PushContainerEvent(container, state.EventAdded) + } + + dm.Logger.Printf("Detected a container (added/%.12s/pidns=%d/mntns=%d)", containerID, container.PidNS, container.MntNS) + + } else if action == "destroy" { + dm.ContainersLock.Lock() + container, ok := dm.Containers[containerID] + if !ok { + dm.ContainersLock.Unlock() + return false + } + dm.EndPointsLock.Lock() + dm.MatchandRemoveContainerFromEndpoint(containerID) + dm.EndPointsLock.Unlock() + delete(dm.Containers, containerID) + dm.ContainersLock.Unlock() + + dm.EndPointsLock.Lock() + // remove apparmor profile for that endpoint + for idx, endPoint := range dm.EndPoints { + if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) { + + // update apparmor profiles + for idxA, profile := range endPoint.AppArmorProfiles { + if profile == container.AppArmorProfile { + dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles[:idxA], dm.EndPoints[idx].AppArmorProfiles[idxA+1:]...) + break + } + } + + break + } + } + dm.EndPointsLock.Unlock() + // delete endpoint if no security rules and containers + idx := 0 + endpointsLength := len(dm.EndPoints) + for idx < endpointsLength { + endpoint := dm.EndPoints[idx] + if container.NamespaceName == endpoint.NamespaceName && container.ContainerName == endpoint.EndPointName && + len(endpoint.SecurityPolicies) == 0 && len(endpoint.Containers) == 0 { + dm.EndPoints = append(dm.EndPoints[:idx], dm.EndPoints[idx+1:]...) + endpointsLength-- + idx-- + } + idx++ + } + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + outkey := dm.SystemMonitor.Logger.ContainerNsKey[containerID] + dm.Logger.DeleteAlertMapKey(outkey) + delete(dm.SystemMonitor.Logger.ContainerNsKey, containerID) + // update NsMap + dm.SystemMonitor.DeleteContainerIDFromNsMap(containerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.UnregisterContainer(containerID) + } + + if cfg.GlobalCfg.StateAgent { + container.Status = "terminated" + go dm.StateAgent.PushContainerEvent(container, state.EventDeleted) + } + + dm.Logger.Printf("Detected a container (removed/%.12s/pidns=%d/mntns=%d)", containerID, container.PidNS, container.MntNS) + } + + return true +} diff --git a/KubeArmor/deployHook/main.go b/KubeArmor/deployHook/main.go new file mode 100644 index 0000000000..b4d60ad717 --- /dev/null +++ b/KubeArmor/deployHook/main.go @@ -0,0 +1,84 @@ +package main + +import ( + "encoding/json" + "io" + "log" + "os" + "path/filepath" + hooks "github.com/containers/common/pkg/hooks/1.0.0" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func applyPodmanHook() error { + hookDir := "/etc/containers/oci/hooks.d/" + if err := os.MkdirAll(hookDir, 0750); err != nil { + return err + } + + dst, err := os.OpenFile(filepath.Join(hookDir, "ka.json"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer dst.Close() + + always := true + hook := hooks.Hook{ + Version: "1.0.0", + Hook: specs.Hook{ + Path: "/usr/share/kubearmor/hook", + Args: []string{ + "/usr/share/kubearmor/hook", + "--runtime-socket", + "/run/podman/podman.sock", + + }, + }, + When: hooks.When{Always: &always}, + Stages: []string{ + "poststart", + "poststop", + }, + } + + hookBytes, err := json.Marshal(hook) + if err != nil { + return err + } + + _, err = dst.Write(hookBytes) + if err != nil { + return err + } + + kaDir := "/usr/share/kubearmor" + if err := os.MkdirAll(kaDir, 0750); err != nil { + return err + } + + dstBin, err := os.OpenFile(filepath.Join(kaDir, "hook"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0755) + if err != nil { + return err + } + defer dstBin.Close() + + srcBin, err := os.Open("/hook") + if err != nil { + return err + } + defer srcBin.Close() + + if _, err := io.Copy(dstBin, srcBin); err != nil { + return err + } + + return nil +} +func main(){ + err := applyPodmanHook() + if err != nil { + log.Printf("Podman hook injection failed: %v", err) + } else { + log.Printf("Podman OCI hook injected successfully") + } +} diff --git a/KubeArmor/hook/main.go b/KubeArmor/hook/main.go new file mode 100644 index 0000000000..871ab3c18d --- /dev/null +++ b/KubeArmor/hook/main.go @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "bufio" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/kubearmor/KubeArmor/KubeArmor/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + LOGPATH = "/var/log/ka-hook.log" + rootfulContainersPath = "/var/lib/containers/storage/overlay-containers" + containersFileName = "containers.json" + volatileContainersFileName = "volatile-containers.json" +) + +var ( + kubeArmorSocket string + runtimeSocket string + detached bool +) + +type ContainerMetadata struct { + ID string `json:"id"` + Names []string `json:"names"` + Image string `json:"image"` + Metadata string `json:"metadata"` +} + +type MetadataDetails struct { + ImageName string `json:"image-name"` + Name string `json:"name"` +} + +func main() { + flag.StringVar(&kubeArmorSocket, "kubearmor-socket", "/var/run/kubearmor/ka.sock", "KubeArmor socket") + flag.StringVar(&runtimeSocket, "runtime-socket", "", "container runtime socket") + flag.BoolVar(&detached, "detached", false, "run detached") + flag.Parse() + + if runtimeSocket == "" { + log.Println("runtime socket must be set") + os.Exit(1) + } + if !strings.HasPrefix(runtimeSocket, "unix://") { + runtimeSocket = "unix://" + runtimeSocket + } + if detached { + if err := runDetached(); err != nil { + log.Println(err) + os.Exit(1) + } + os.Exit(0) + } + input, err := io.ReadAll(os.Stdin) + if err != nil { + log.Println(err) + os.Exit(1) + } + state := specs.State{} + err = json.Unmarshal(input, &state) + if err != nil { + log.Println(err) + os.Exit(1) + } + // Convert state to JSON for logging + stateJSON, err := json.Marshal(state) + if err != nil { + log.Println("failed to marshal state: %v", err) + os.Exit(1) + } + // Write data to the log file + logFile, err := os.OpenFile(LOGPATH, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + os.Exit(1) + } + _, err = logFile.WriteString("Container State is:" + string(stateJSON) + "\n") + if err != nil { + log.Println("failed to write to log file: %v", err) + os.Exit(1) + } + + if err := run(state); err != nil { + log.Println(err) + os.Exit(1) + } + +} + +func runDetached() error { + // we need to make sure the process exits at some point + time.AfterFunc(1*time.Minute, func() { + log.Println("failed to get containers, process timed out") + os.Exit(1) + }) + conn := waitOnKubeArmor() + defer conn.Close() + + handler, err := newPodmanHandler(runtimeSocket) + if err != nil { + return err + } + containers, err := handler.listContainers() + if err != nil { + return err + } + + for _, container := range containers { + data := types.HookRequest{ + Operation: types.HookContainerCreate, + Detached: true, + Container: container, + } + + dataJSON, err := json.Marshal(data) + if err != nil { + return err + } + + _, err = conn.Write(dataJSON) + if err != nil { + return err + } + ack := make([]byte, 1024) + _, err = conn.Read(ack) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + } + + return nil +} + +func run(state specs.State) error { + var container types.Container + operation := types.HookContainerCreate + // we try to connect to runtime here to make sure the socket is correct + // before spawning a detached process + _, err := newPodmanHandler(runtimeSocket) + if err != nil { + return err + } + + container.ContainerID = state.ID + if state.Status == specs.StateStopped { + operation = types.HookContainerDelete + return sendContainer(container, operation) + } + + var appArmorProfile string + var isKubeArmor bool + specBytes, err := os.ReadFile(filepath.Join(state.Bundle, "config.json")) + if err != nil { + return err + } else { + var spec specs.Spec + err = json.Unmarshal(specBytes, &spec) + if err != nil { + return err + } + appArmorProfile = spec.Process.ApparmorProfile // check if Process is nil?? + isKubeArmor = spec.Process.Args[0] == "/KubeArmor/kubearmor" + } + if isKubeArmor { + err = startDetachedProcess() + if err != nil { + return err + } + // we still continue to try to send container details after starting the detached process + // to make sure if it was a false positive (container trying to act as KubeArmor), we still + // monitor it. + } + passwdFile, err := os.Open("/etc/passwd") + if err != nil { + log.Fatalf("Failed to open /etc/passwd: %v", err) + } + defer passwdFile.Close() + + scanner := bufio.NewScanner(passwdFile) + var homeDir string + + // Iterate through /etc/passwd to find the user with the desired directory + for scanner.Scan() { + fields := strings.Split(scanner.Text(), ":") + if len(fields) < 6 { + continue // skip malformed lines + } + + userHomeDir := fields[5] + potentialPath := filepath.Join(userHomeDir, ".local/share/containers/storage/overlay-containers/containers.json") + + if _, err := os.Stat(potentialPath); err == nil { + homeDir = userHomeDir + break + } + } + + if homeDir == "" { + log.Fatalf("No matching user found with the overlay-containers path.") + } + + rootlessContainersPath := filepath.Join(homeDir, ".local/share/containers/storage/overlay-containers") + + // Rootful Podman metadata paths + metadataPath1 := filepath.Join(rootfulContainersPath, containersFileName) + metadataPath2 := filepath.Join(rootfulContainersPath, volatileContainersFileName) + + // Rootless Podman metadata paths + metadataPath3 := filepath.Join(rootlessContainersPath, containersFileName) + metadataPath4 := filepath.Join(rootlessContainersPath, volatileContainersFileName) + + + var paths []string + + isRootFullPodman := runtimeSocket == "unix:///run/podman/podman.sock" + + if isRootFullPodman { + paths = []string{metadataPath1, metadataPath2} + } else { + paths = []string{metadataPath3, metadataPath4} + } + + var details MetadataDetails + found := false + for _, path := range paths { + details, err = fetchContainerDetails(state.ID, path) + if err == nil { + found = true + break + } else { + fmt.Errorf("Error: %v\n", err) + } + } + + if !found { + return fmt.Errorf("container with ID %s not found in any path", state.ID) + } + + labels := []string{} + + for k, v := range state.Annotations { + labels = append(labels, k+"="+v) + } + //add labels for policy matching + labels = append(labels, "namespaceName="+"container_namespace") + labels = append(labels, "containerType="+"podman") + labels = append(labels, "kubearmor.io/container.name="+details.Name) + + nodename, nodeErr := os.Hostname() + if nodeErr != nil { + nodename = "" + } + + container.Labels = strings.Join(labels, ",") + + status := "stopped" + if state.Status == specs.StateRunning { + status = "running" + } + container = types.Container{ + ContainerID: state.ID, + ContainerName: details.Name, + ContainerImage: details.ImageName, + AppArmorProfile: appArmorProfile, + NamespaceName: "container_namespace", + EndPointName: details.Name, + NodeName: nodename, + Status: status, + Labels: strings.Join(labels, ","), + } + container.PidNS, container.MntNS = getNS(state.Pid) + + return sendContainer(container, operation) +} + +func fetchContainerDetails(containerID, metadataPath string) (MetadataDetails, error) { + + data, err := ioutil.ReadFile(metadataPath) + if err != nil { + return MetadataDetails{}, fmt.Errorf("unable to read metadata file: %w", err) + } + + var containers []ContainerMetadata + err = json.Unmarshal(data, &containers) + if err != nil { + return MetadataDetails{}, fmt.Errorf("unable to parse metadata file: %w", err) + } + + for _, container := range containers { + + + if container.ID == containerID { + var details MetadataDetails + err := json.Unmarshal([]byte(container.Metadata), &details) + if err != nil { + return MetadataDetails{}, fmt.Errorf("unable to parse container metadata: %w", err) + } + return details, nil + } + } + + return MetadataDetails{}, fmt.Errorf("container with ID %s not found", containerID) +} + +func getNS(pid int) (uint32, uint32) { + var pidNS uint32 + var mntNS uint32 + + nsPath := fmt.Sprintf("/proc/%d/ns", pid) + + pidLink, err := os.Readlink(filepath.Join(nsPath, "pid")) + if err == nil { + if _, err := fmt.Sscanf(pidLink, "pid:[%d]\n", &pidNS); err != nil { + log.Println(err) + } + } + + mntLink, err := os.Readlink(filepath.Join(nsPath, "mnt")) + if err == nil { + if _, err := fmt.Sscanf(mntLink, "mnt:[%d]\n", &mntNS); err != nil { + log.Println(err) + } + } + return pidNS, mntNS +} + +func sendContainer(container types.Container, operation types.HookOperation) error { + conn, err := net.Dial("unix", kubeArmorSocket) + if err != nil { + // not returning error here because this can happen in multiple cases + // that we don't want container creation to be blocked on: + // - hook was created before KubeArmor was running so the socket doesn't exist yet + // - KubeArmor crashed so there is nothing listening on socket + return nil + } + + defer conn.Close() + + data := types.HookRequest{ + Operation: operation, + Detached: false, + Container: container, + } + + dataJSON, err := json.Marshal(data) + if err != nil { + return err + } + + + for { + _, err = conn.Write(dataJSON) + if err != nil { + return err + } + ack := make([]byte, 1024) + n, err := conn.Read(ack) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + response := ack[:n] + if bytes.Equal(response, []byte("ok")) { + return nil + } else { + time.Sleep(50 * time.Millisecond) // try again in 50 ms + continue + } + + } +} + +func waitOnKubeArmor() net.Conn { + for { + conn, err := net.Dial("unix", kubeArmorSocket) + if err == nil { + return conn + } + time.Sleep(500 * time.Millisecond) + } +} + +func startDetachedProcess() error { + args := os.Args[1:] + args = append(args, "--detached") + cmd := exec.Command(os.Args[0], args...) + logFile, err := os.OpenFile("/var/log/ka-hook.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + return err + } + cmd.Stdout = logFile + cmd.Stderr = logFile + err = cmd.Start() + if err != nil { + return err + } + return cmd.Process.Release() +} diff --git a/KubeArmor/hook/podman.go b/KubeArmor/hook/podman.go new file mode 100644 index 0000000000..3b30427bb8 --- /dev/null +++ b/KubeArmor/hook/podman.go @@ -0,0 +1,82 @@ +// +build linux +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + + +package main + + +import ( + "context" + "strings" + + + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/containers" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/kubearmor/KubeArmor/KubeArmor/types" +) + + +type podmanHandler struct { + conn context.Context +} + + +func newPodmanHandler(socket string) (*podmanHandler, error) { + conn, err := bindings.NewConnection(context.Background(), socket) + if err != nil { + return nil, err + } + return &podmanHandler{conn: conn}, nil +} + + +func (h *podmanHandler) listContainers() ([]types.Container, error) { + + listOptions := &containers.ListOptions{ + Namespace: func(b bool) *bool { return &b }(true), + } + + containerList, err := containers.List(h.conn, listOptions) + if err != nil { + return nil, err + } + + var containersData []types.Container + for _, container := range containerList { + c := containerFromListContainer(container) + containersData = append(containersData, c) + } + return containersData, nil +} + + +func containerFromListContainer(container entities.ListContainer) types.Container { + kaContainer := types.Container{} + + + kaContainer.ContainerID = container.ID + if len(container.Names) > 0 { + kaContainer.ContainerName = container.Names[0] + kaContainer.EndPointName = container.Names[0] + } + + + kaContainer.NamespaceName = "container_namespace" + // kaContainer.Privileged = container.Labels["privileged"] == "true" // Assuming a 'privileged' label is set + labels := []string{} + labels = append(labels, "namespaceName="+"container_namespace") + labels = append(labels, "containerType="+"podman") + labels = append(labels, "kubearmor.io/container.name="+container.Names[0]) + + for k, v := range container.Labels { + labels = append(labels,k+"="+v) + } + kaContainer.Labels = strings.Join(labels,",") + kaContainer.Status = container.State + kaContainer.PidNS, kaContainer.MntNS = getNS(container.Pid) + + return kaContainer +} + diff --git a/KubeArmor/monitor/systemMonitor.go b/KubeArmor/monitor/systemMonitor.go index c246c874bb..c8e518dff9 100644 --- a/KubeArmor/monitor/systemMonitor.go +++ b/KubeArmor/monitor/systemMonitor.go @@ -115,10 +115,10 @@ type SystemMonitor struct { // logs Logger *fd.Feeder - // container id -> cotnainer + // container id -> container Containers *map[string]tp.Container ContainersLock **sync.RWMutex - + // container id -> host pid ActiveHostPidMap *map[string]tp.PidMap ActivePidMapLock **sync.RWMutex diff --git a/KubeArmor/types/types.go b/KubeArmor/types/types.go index 1b6046f18d..0c133dfdcc 100644 --- a/KubeArmor/types/types.go +++ b/KubeArmor/types/types.go @@ -654,3 +654,20 @@ type PidNode struct { // KubeArmorHostPolicyEventCallback Function type KubeArmorHostPolicyEventCallback func(K8sKubeArmorHostPolicyEvent) pb.PolicyStatus + +// =========== // +// == Hooks == // +// =========== // + +type HookRequest struct { + Operation HookOperation `json:"operation"` + Detached bool `json:"detached"` + Container Container `json:"container"` +} + +type HookOperation int + +const ( + HookContainerCreate HookOperation = iota + HookContainerDelete +) \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000000..59adad51f9 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,86 @@ +services: + kubearmor-init: + container_name: kubearmor-init + image: docker.io/cheithanya/kubearmor-init:latest + pull_policy: "always" + user: root + labels: + app: kubearmor-init + volumes: + - "/tmp:/opt/kubearmor/BPF:rw,z" + - "/lib/modules:/lib/modules:ro,z" + - "/sys/fs/bpf:/sys/fs/bpf:ro,z" + - "/sys/kernel/security:/sys/kernel/security:ro,z" + - "/sys/kernel/debug:/sys/kernel/debug:ro,z" + - "/usr/src:/usr/src:z" + - "/media/root/etc/os-release:/media/root/etc/os-release:ro,z" + - "/etc/containers/oci/hooks.d/:/etc/containers/oci/hooks.d/:rw,z" + - "/usr/share/kubearmor:/usr/share/kubearmor:rw,z" + restart: on-failure + privileged: true + cap_add: + - SETUID + - SETGID + - SETPCAP + - SYS_ADMIN + - SYS_PTRACE + - MAC_ADMIN + - SYS_RESOURCE + - IPC_LOCK + - DAC_OVERRIDE + - DAC_READ_SEARCH + + kubearmor: + depends_on: + kubearmor-init: + condition: service_completed_successfully + hostname: cheithanya + container_name: kubearmor + image: "docker.io/cheithanya/kubearmor:latest" + pull_policy: "always" + user: root + command: + - "-k8s=false" + - "-enableKubeArmorPolicy" + - "-enableKubeArmorHostPolicy" + - "-visibility=process,network" + - "-hostVisibility=process,network" + - "-criSocket=unix:///run/podman/podman.sock" + - "-defaultFilePosture=audit" + - "-defaultNetworkPosture=audit" + - "-defaultCapabilitiesPosture=audit" + - "-hostDefaultFilePosture=audit" + - "-hostDefaultNetworkPosture=audit" + - "-hostDefaultCapabilitiesPosture=audit" + labels: + app: kubearmor + volumes: + - "/tmp:/opt/kubearmor/BPF" + - "/sys/fs/bpf:/sys/fs/bpf" + - "/sys/kernel/security:/sys/kernel/security" + - "/sys/kernel/debug:/sys/kernel/debug" + - "/etc/apparmor.d:/etc/apparmor.d" + - "/var/run/docker.sock:/var/run/docker.sock" + - "/run/docker:/run/docker" + - "/var/lib/docker:/var/lib/docker" + - "/etc/containers/oci/hooks.d/:/etc/containers/oci/hooks.d/:rw" + - "/usr/share/kubearmor:/usr/share/kubearmor:rw" + - "/var/run/kubearmor:/var/run/kubearmor:rw" + restart: always + ports: + - "32767:32767" + pid: "host" + privileged: true + cap_add: + - SETUID + - SETGID + - SETPCAP + - SYS_ADMIN + - SYS_PTRACE + - MAC_ADMIN + - SYS_RESOURCE + - IPC_LOCK + - DAC_OVERRIDE + - DAC_READ_SEARCH + +