From 254280faf879c9f1bfae11c435f1ca9f9a34d0c1 Mon Sep 17 00:00:00 2001 From: Archana Shinde Date: Thu, 25 Jul 2024 15:02:53 -0700 Subject: [PATCH] Add scripts to setup CC with qemu on a single node Scripts create a VM image with TDX support, install required dependencies and start qemu with confidential cluster running on it. Signed-off-by: Archana Shinde --- deployment/bare-metal/README.md | 74 +++++++ deployment/bare-metal/create_k8s_node.sh | 54 ++++++ deployment/bare-metal/install_dependencies.sh | 180 ++++++++++++++++++ deployment/bare-metal/setup_cc.sh | 170 +++++++++++++++++ 4 files changed, 478 insertions(+) create mode 100644 deployment/bare-metal/README.md create mode 100755 deployment/bare-metal/create_k8s_node.sh create mode 100755 deployment/bare-metal/install_dependencies.sh create mode 100755 deployment/bare-metal/setup_cc.sh diff --git a/deployment/bare-metal/README.md b/deployment/bare-metal/README.md new file mode 100644 index 0000000..dca048d --- /dev/null +++ b/deployment/bare-metal/README.md @@ -0,0 +1,74 @@ +# Deployment Guide on TD baremetal host + +This guide introduces how to setup an Intel TDX host on Ubuntu 24.04 and a TD VM with +a single node kubernetes cluster running on it. +Follow these instructions to setup Intel TDX host, create a TD image, boot the TD and run a +kubernestes cluster within the TD. + +### Prerequisite + +Instructions are relevant for 4th Generation Intel® Xeon® Scalable Processors with activated Intel® TDX +and all 5th Generation Intel® Xeon® Scalable Processors. + +### Setup host + +We first need to install ageneric Ubuntu 24.04 server image, install necessay packages to turn +the host OS into an Intel TDX-enabled host OS and enable TDX settings in the BIOS. +Detailed instructions to do so can be found here [setup-tdx-host](https://github.com/canonical/tdx?tab=readme-ov-file#setup-tdx-host). + +To setup your host, you will essentially need to do this: +``` +$ curl https://raw.githubusercontent.com/canonical/tdx/noble-24.04/setup-tdx-host.sh +$ ./setup-tdx-host.sh +``` + +Once the above step is completed, you will need to reboot your machine and proceed to change the + BIOS settings to enable TDX. + +Go to Socket Configuration > Processor Configuration > TME, TME-MT, TDX. + + * Set `Memory Encryption (TME)` to `Enabled` + * Set `Total Memory Encryption Bypass` to `Enabled` (Optional setting for best host OS and regular VM performance.) + * Set `Total Memory Encryption Multi-Tenant (TME-MT)` to `Enabled` + * Set `TME-MT memory integrity` to `Disabled` + * Set `Trust Domain Extension (TDX)` to `Enabled` + * Set `TDX Secure Arbitration Mode Loader (SEAM Loader)` to `Enabled`. (NOTE: This allows loading Intel TDX Loader and Intel TDX Module from the ESP or BIOS.) + * Set `TME-MT/TDX key split` to a non-zero value + +Go to `Socket Configuration > Processor Configuration > Software Guard Extension (SGX)`. + + * Set `SW Guard Extensions (SGX)` to `Enabled` + +Save BIOS settings and boot up. Verify that the host has TDX enabled using dmesg command: +``` +$ sudo dmesg | grep -i tdx +[ 1.523617] Kernel command line: BOOT_IMAGE=/boot/vmlinuz-6.8.0-1004-intel root=UUID=f5524554-48b2-4edf-b0aa-3cebac84b167 ro kvm_intel.tdx=1 nohibernate nomodeset +[ 2.551768] virt/tdx: BIOS enabled: private KeyID range [16, 128) +[ 2.551773] virt/tdx: Disable ACPI S3. Turn off TDX in the BIOS to use ACPI S3. +[ 20.408972] virt/tdx: TDX module: attributes 0x0, vendor_id 0x8086, major_version 2, minor_version 0, build_date 20231112, build_num 635 +``` + +### Setup guest + +To setup a guest image with TDX kernel and has all the binaries required for running +a k3s/k8s cluster, run the following script: + +``` +./setup_cc.sh +``` + +### Launch a kubernetes cluster + +The above step will install a helper script to start a single node kubernetes cluster in the +home directory for the `tdx` user in the guest image. + +To ssh into the TD VM: +``` +$ curl -LO https://raw.githubusercontent.com/cc-api/cvm-image-rewriter/main/start-virt.sh +$ ./start-virt.sh -i output.qcow2 +``` + +Once you have logged in the TD VM, run the following script to start a single node kubernetes cluster: +``` +$ /home/tdx/launch_cc.sh +``` diff --git a/deployment/bare-metal/create_k8s_node.sh b/deployment/bare-metal/create_k8s_node.sh new file mode 100755 index 0000000..dff61a0 --- /dev/null +++ b/deployment/bare-metal/create_k8s_node.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Copyright (c) 2020 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +#set -o xtrace +set -o errexit +set -o nounset +set -o pipefail +set -o errtrace + +pod_network_cidr=${pod_network_cidr:-"10.244.0.0/16"} +cni_project=${cni_project:-"calico"} + +init_cluster() { + if [ -d "$HOME/.kube" ]; then + rm -rf "$HOME/.kube" + fi + + sudo bash -c 'modprobe br_netfilter' + sudo bash -c 'modprobe overlay' + sudo bash -c 'swapoff -a' + + # initialize cluster + #sudo -E kubeadm init --config=./kubeadm.yaml + kubeadm init --pod-network-cidr=${pod_network_cidr} + + mkdir -p "${HOME}/.kube" + cp /etc/kubernetes/admin.conf $HOME/.kube/config + chown $(id -u):$(id -g) $HOME/.kube/config + + # taint master node: + kubectl taint nodes --all node-role.kubernetes.io/master- +} + +install_cni() { + + if [[ $cni_project == "calico" ]]; then + calico_url="https://projectcalico.docs.tigera.io/manifests/calico.yaml" + kubectl apply -f $calico_url + else + flannel_url="https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml" + kubectl apply -f $flannel_url + fi +} + +main() { + init_cluster + install_cni +} + +main $@ diff --git a/deployment/bare-metal/install_dependencies.sh b/deployment/bare-metal/install_dependencies.sh new file mode 100755 index 0000000..f34f2e9 --- /dev/null +++ b/deployment/bare-metal/install_dependencies.sh @@ -0,0 +1,180 @@ +#!/bin/bash + +set -e + +http_proxy=${http_proxy:-} +https_proxy=${https_proxy:-} +no_proxy=${no_proxy:-} + +function setup_proxy { + cat <<-EOF | sudo tee -a "/tmp/environment" +http_proxy = "${http_proxy}" +https_proxy = "${https_proxy}" +no_proxy = "${no_proxy}" +HTTP_PROXY = "${http_proxy}" +HTTPS_PROXY = "${https_proxy}" +NO_PROXY = "${no_proxy}" +EOF + + + #cat <<-EOF | sudo tee -a "/etc/profile.d/myenvvar.sh" + cat <<-EOF | sudo tee -a "/tmp/myenvvar.sh" +http_proxy = "${http_proxy}" +https_proxy = "${https_proxy}" +no_proxy = "${no_proxy}" +EOF + + sudo sh -c 'systemctl set-environment http_proxy="${http_proxy}"' + sudo sh -c 'systemctl set-environment https_proxy="${https_proxy}"' + sudo sh -c 'systemctl set-environment no_proxy="${no_proxy}"' +} + +function install_docker { + # install GPG key + install -m 0755 -d /etc/apt/keyrings + rm -f /etc/apt/keyrings/docker.gpg + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg + chmod a+r /etc/apt/keyrings/docker.gpg + + # install repo + echo \ + "deb [arch=\"$(dpkg --print-architecture)\" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + tee /etc/apt/sources.list.d/docker.list > /dev/null + apt-get update > /dev/null + + # install docker + apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + systemctl enable docker + + add_docker_proxy_for_builds + + # Add proxy for docker and containerd. This proxy is used in docker pull + + services=("containerd docker") + add_systemd_service_proxy "${services[@]}" +} + +function add_docker_proxy_for_builds() { + mkdir -p /home/tdx/.docker + cat <<-EOF | sudo tee "/home/tdx/.docker/config.json" +{ + "proxies": { + "default": { + "httpProxy": "${http_proxy}", + "httpsProxy": "${https_proxy}", + "noProxy": "${no_proxy}" + } + } +} +EOF +} + +function install_helm { + # install repo + curl -fsSL https://baltocdn.com/helm/signing.asc | gpg --dearmor | tee /usr/share/keyrings/helm.gpg > /dev/null + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | \ + tee /etc/apt/sources.list.d/helm-stable-debian.list > /dev/null + apt-get update > /dev/null + + # install helm + apt-get install -y helm +} + + +function install_pip { + # install python3-pip + apt install -y python3-pip +} + +function install_k3s { + curl -sfL https://get.k3s.io | sh - + + #configure proxy + local k3s_env_file="/etc/systemd/system/k3s.service.env" + cat <<-EOF | sudo tee -a $k3s_env_file +HTTP_PROXY="${http_proxy}" +HTTPS_PROXY="${https_proxy}" +NO_PROXY="${no_proxy}" +EOF + +} + +function install_k8s { + sudo -E bash -c 'apt-get -y clean' + + # Install Kubernetes: + echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + sudo -E apt update + sudo -E apt install -y kubelet kubeadm kubectl + + # Packets traversing the bridge should be sent to iptables for processing + echo br_netfilter | sudo -E tee /etc/modules-load.d/k8s.conf + sudo -E bash -c 'echo "net.bridge.bridge-nf-call-ip6tables = 1" > /etc/sysctl.d/k8s.conf' + sudo -E bash -c 'echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.d/k8s.conf' + sudo -E bash -c 'echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.d/k8s.conf' + sudo -E sysctl --system + + # disable swap + swapoff -a + sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab + + services=("kubelet") + add_systemd_service_proxy "${services[@]}" +} + +function add_systemd_service_proxy() { + local components=("$@") + # Config proxy + local HTTPS_PROXY="$HTTPS_PROXY" + local https_proxy="$https_proxy" + if [ -z "$HTTPS_PROXY" ]; then + HTTPS_PROXY="$https_proxy" + fi + + local HTTP_PROXY="$HTTP_PROXY" + local http_proxy="$http_proxy" + if [ -z "$HTTP_PROXY" ]; then + HTTP_PROXY="$http_proxy" + fi + + local NO_PROXY="$NO_PROXY" + local no_proxy="$no_proxy" + if [ -z "$NO_PROXY" ]; then + NO_PROXY="$no_proxy" + fi + + if [[ -n $HTTP_PROXY ]] || [[ -n $HTTPS_PROXY ]] || [[ -n $NO_PROXY ]]; then + for component in "${components[@]}"; do + echo "component: " "${component}" + mkdir -p /etc/systemd/system/"${component}.service.d"/ + tee /etc/systemd/system/"${component}.service.d"/http-proxy.conf < /dev/null && pwd ) +GUEST_IMG="tdx-guest-ubuntu-24.04-intel.qcow2" + +cleanup() { + # Todo: Kill any VMS + return +} + +create_tdx_image() { + # Install pre-requisites + sudo apt -y install git + sudo apt --no-install-recommends -y install qemu-utils guestfs-tools virtinst genisoimage libvirt-daemon-system libvirt-daemon + + rm -rf "${tdx_repo}" + mkdir "${tdx_repo}" + git clone "${tdx_repo_url}" "${tdx_repo}" + + pushd "${tdx_repo}/guest-tools/image" + # create tdx-guest-ubuntu-24.04-generic.qcow2 + sudo -E ./create-td-image.sh + popd + + # image is created under ${tdx_repo/guest-tools/image/tdx-guest-ubuntu-24.04-generic.qcow2" + img_name="tdx-guest-ubuntu-24.04-generic.qcow2" + mv "${tdx_repo}/guest-tools/image/${img_name}" /tmp/${GUEST_IMG} +} + +create_tdx_image_cc_image_writer() { + # TODO generate image using the cc-image-writer (https://github.com/cc-api/cvm-image-rewriter) + # TODO: clone and create plugin to install k8s dependencies + + rm -rf "${cima_repo}" + mkdir "${cima_repo}" + git clone "${cima_repo_url}" "${cima_repo}" + + pushd ${cima_repo}/tools/build + sudo -E ./build.sh + popd + + rm -rf "${image_rewriter_repo}" + mkdir "${image_rewriter_repo}" + git clone "${image_rewriter_url}" "${image_rewriter_repo}" + + curl --output "${cloud_img}" "${cloud_img_url}" + + pushd "${image_rewriter_repo}" + touch plugins/98-ima-example/NOT_RUN + export CVM_TDX_GUEST_REPO="${cima_reo}/tools/build/output" + ./run.sh -i "${cloud_img}" -t 15 +} + +setup_guest_image() { + virt-customize -a ${image_rewriter_repo}/output.qcow2 \ + --mkdir /tmp/tdx/ \ + --copy-in ${CURR_DIR}/install_dependencies.sh:/tmp/tdx/ \ + --copy-in ${CURR_DIR}/create_k8s_node.sh:/home/tdx/ \ + --run-command "http_proxy=${http_proxy} https_proxy=${https_proxy} no_proxy=${no_proxy} /tmp/tdx/install_dependencies.sh" + if [ $? -eq 0 ]; then + ok "Setup guest image..." + else + error "Failed to setup guest image" + fi +} + +check_tdx_version() { + # Check for tdx version in demesg? + # version seems to be relaible only on Ubuntu, not on Centos + local tdx_line=$(sudo sh -c 'dmesg | grep "TDX module"') + if [ -n "$tdx_line" ]; then + local major_version=$(echo $tdx_line | grep -oP 'major_version \K[0-9]+') + local minor_version=$(echo $tdx_line | grep -oP 'minor_version \K[0-9]+') + local build_date=$(echo $tdx_line | grep -oP 'build_date \K[0-9]+') + local build_num=$(echo $tdx_line | grep -oP 'build_num \K[0-9]+') + + echo "Major Version: $major_version" + echo "Minor Version: $minor_version" + echo "Build date: $build_date" + echo "Minor Version: $build_num" + else + echo "Could not determine TDX version" + fi +} + +launch_cc_k3s() { + # Start k3s cluster within TD VM +} + +launch_cc_k8s() { + # Starts k8s cluster within TD VM + # 1) Start qemu VM + # 2) ssh into the VM + # 3) Start a single node k8s cluster with master marked as worker +} + +install_pre_reqs() { + sudo -E apt install -y qemu-utils guestfs-tools virtinst genisoimage libvirt-daemon-system libvirt-daemon cloud-init + sudo usermod -aG libvirt $USER + sudo chmod o+r /boot/vmlinuz-* + + cat <<-EOF | sudo tee -a "/etc/libvirt/qemu.conf" +user = "root" +group = "root" +dynamic_ownership = 0 + EOF + + sudo systemctl daemon-reload + sudo systemctl restart libvirt +} + + +main() { + trap cleanup EXIT + + echo "http_proxy: " "${http_proxy}" + echo "https_proxy: " "${https_proxy}" + echo "no_proxy: " "${no_proxy}" + + launch_with_k3s=${launch_with_k3s:-false} + + # Check tdx version and warn if using obsolete + check_tdx_version() + + # Install dependencies for building tdx image + install_pre_reqs() + + # Creates a VM image with TDX support + create_tdx_image() + + # Installs dependencies in the guest image including k3s, k8s, docker, helm + # and configures proxy for each one + setup_guest_image() + + if launch_with_k3s == true { + launch_cc_k3s() + else { + launch_cc_k8s() + } +} + +main $@ +