From a5f36cb9962de34550e794837626e67978786dbd Mon Sep 17 00:00:00 2001 From: Archana Shinde Date: Thu, 25 Jul 2024 15:02:53 -0700 Subject: [PATCH] Add scripts to setup CC with qemu on a single node Scripts create a VM image with TDX support, install required dependencies and start qemu with confidential cluster running on it. Signed-off-by: Archana Shinde --- deployment/bare-metal/README.md | 77 +++++++ deployment/bare-metal/create_k8s_node.sh | 110 ++++++++++ deployment/bare-metal/install_dependencies.sh | 198 ++++++++++++++++++ deployment/bare-metal/setup_cc.sh | 164 +++++++++++++++ 4 files changed, 549 insertions(+) create mode 100644 deployment/bare-metal/README.md create mode 100755 deployment/bare-metal/create_k8s_node.sh create mode 100755 deployment/bare-metal/install_dependencies.sh create mode 100755 deployment/bare-metal/setup_cc.sh diff --git a/deployment/bare-metal/README.md b/deployment/bare-metal/README.md new file mode 100644 index 0000000..da56b5b --- /dev/null +++ b/deployment/bare-metal/README.md @@ -0,0 +1,77 @@ +# Deployment Guide on TD baremetal host + +This guide introduces how to setup an Intel TDX host on Ubuntu 24.04 and a TD VM with +a single node kubernetes cluster running on it. +Follow these instructions to setup Intel TDX host, create a TD image, boot the TD and run a +kubernetes cluster within the TD. + +### Prerequisite + +Instructions are relevant for 4th Generation Intel® Xeon® Scalable Processors with activated Intel® TDX +and all 5th Generation Intel® Xeon® Scalable Processors. + +### Setup host + +We first need to install a generic Ubuntu 24.04 server image, install necessary packages to turn +the host OS into an Intel TDX-enabled host OS and enable TDX settings in the BIOS. +Detailed instructions to do so can be found here [setup-tdx-host](https://github.com/canonical/tdx?tab=readme-ov-file#setup-tdx-host). + +To setup your host, you will essentially need to do this: +``` +$ curl https://raw.githubusercontent.com/canonical/tdx/noble-24.04/setup-tdx-host.sh +$ ./setup-tdx-host.sh +``` + +Once the above step is completed, you will need to reboot your machine and proceed to change the + BIOS settings to enable TDX. + +Go to Socket Configuration > Processor Configuration > TME, TME-MT, TDX. + + * Set `Memory Encryption (TME)` to `Enabled` + * Set `Total Memory Encryption Bypass` to `Enabled` (Optional setting for best host OS and regular VM performance.) + * Set `Total Memory Encryption Multi-Tenant (TME-MT)` to `Enabled` + * Set `TME-MT memory integrity` to `Disabled` + * Set `Trust Domain Extension (TDX)` to `Enabled` + * Set `TDX Secure Arbitration Mode Loader (SEAM Loader)` to `Enabled`. (NOTE: This allows loading Intel TDX Loader and Intel TDX Module from the ESP or BIOS.) + * Set `TME-MT/TDX key split` to a non-zero value + +Go to `Socket Configuration > Processor Configuration > Software Guard Extension (SGX)`. + + * Set `SW Guard Extensions (SGX)` to `Enabled` + +Save BIOS settings and boot up. Verify that the host has TDX enabled using dmesg command: +``` +$ sudo dmesg | grep -i tdx +[ 1.523617] Kernel command line: BOOT_IMAGE=/boot/vmlinuz-6.8.0-1004-intel root=UUID=f5524554-48b2-4edf-b0aa-3cebac84b167 ro kvm_intel.tdx=1 nohibernate nomodeset +[ 2.551768] virt/tdx: BIOS enabled: private KeyID range [16, 128) +[ 2.551773] virt/tdx: Disable ACPI S3. Turn off TDX in the BIOS to use ACPI S3. +[ 20.408972] virt/tdx: TDX module: attributes 0x0, vendor_id 0x8086, major_version 2, minor_version 0, build_date 20231112, build_num 635 +``` + +### Setup guest + +To setup a guest image with TDX kernel and has all the binaries required for running +a k3s/k8s cluster, run the following script: + +``` +./setup_cc.sh +``` + +After running the script, you should see an image with the name `tdx-guest-ubuntu-24.04-intel.qcow2` +generated in the current directory. The image is setup with user `tdx` with password `123456`. + +### Launch a kubernetes cluster + +The above step will install a helper script to start a single node kubernetes cluster in the +home directory for the `tdx` user in the guest image. + +To ssh into the TD VM: +``` +$ curl -LO https://raw.githubusercontent.com/cc-api/cvm-image-rewriter/main/start-virt.sh +$ ./start-virt.sh -i tdx-guest-ubuntu-24.04-intel.qcow2 +``` + +Once you have logged in the TD VM, run the following script to start a single node kubernetes cluster: +``` +$ /home/tdx/create_k8s_node.sh +``` diff --git a/deployment/bare-metal/create_k8s_node.sh b/deployment/bare-metal/create_k8s_node.sh new file mode 100755 index 0000000..03bb5b5 --- /dev/null +++ b/deployment/bare-metal/create_k8s_node.sh @@ -0,0 +1,110 @@ +#!/bin/bash +# +# Copyright (c) 2024 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +#set -o xtrace +set -o errexit +set -o nounset +set -o pipefail +set -o errtrace + +http_proxy=${http_proxy:-} +https_proxy=${https_proxy:-} +no_proxy=${no_proxy:-} + +pod_network_cidr=${pod_network_cidr:-"10.244.0.0/16"} +service_cidr=${pod_network_cidr:-"10.96.0.0/12"} +cni_project=${cni_project:-"calico"} +local_ip_address="" + +init_cluster() { + if [ -d "$HOME/.kube" ]; then + rm -rf "$HOME/.kube" + fi + + sudo bash -c 'modprobe br_netfilter' + sudo bash -c 'modprobe overlay' + sudo bash -c 'swapoff -a' + + sudo systemctl stop apparmor + sudo systemctl disable apparmor + + # initialize cluster + #kubeadm init --pod-network-cidr=${pod_network_cidr} --ignore-preflight-errors=all + sudo kubeadm init --ignore-preflight-errors=all --config kubeadm-config.yaml + + mkdir -p "${HOME}/.kube" + cp /etc/kubernetes/admin.conf $HOME/.kube/config + chown $(id -u):$(id -g) $HOME/.kube/config + + # taint master node: + kubectl taint nodes --all node-role.kubernetes.io/master- +} + +install_cni() { + + if [[ $cni_project == "calico" ]]; then + calico_url="https://projectcalico.docs.tigera.io/manifests/calico.yaml" + kubectl apply -f $calico_url + else + flannel_url="https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml" + kubectl apply -f $flannel_url + fi +} + +find_local_ip_addr() { + # Find the network interface name starting with "enp" or "eth" + interface=$(ip -o link show | awk -F': ' '/^2: enp|^2: eth/ {print $2}') + if [ ! -z "$interface" ]; then + # Get the IP address of the found interface + local_ip_address=$(ip -o -4 addr show "$interface" | awk '{print $4}' | cut -d/ -f1) + fi +} + +# Set proxy with systemctl. +# This steps is required for kubeadm, even though the proxies are set in the systemd config files. +set_systemctl_proxy() { + # Config proxy + local HTTPS_PROXY="$HTTPS_PROXY" + local https_proxy="$https_proxy" + if [ -z "$HTTPS_PROXY" ]; then + HTTPS_PROXY="$https_proxy" + fi + + local HTTP_PROXY="$HTTP_PROXY" + local http_proxy="$http_proxy" + if [ -z "$HTTP_PROXY" ]; then + HTTP_PROXY="$http_proxy" + fi + + local NO_PROXY="$NO_PROXY" + local no_proxy="$no_proxy" + if [ -z "$NO_PROXY" ]; then + NO_PROXY="$no_proxy" + fi + + find_local_ip_addr + if [ ! -z "$local_ip_address" ]; then + NO_PROXY="$NO_PROXY,${ip_address}" + fi + + NO_PROXY="$NO_PROXY,${pod_network_cidr},${service_cidr}" + + if [[ -n $HTTP_PROXY ]] || [[ -n $HTTPS_PROXY ]] || [[ -n $NO_PROXY ]]; then + sudo systemctl set-environment HTTP_PROXY="$HTTP_PROXY" + sudo systemctl set-environment HTTPS_PROXY="$HTTPS_PROXY" + sudo systemctl set-environment NO_PROXY="$NO_PROXY" + sudo systemctl restart containerd.service + fi +} + +main() { + set_systemctl_proxy + init_cluster + install_cni +} + +main $@ diff --git a/deployment/bare-metal/install_dependencies.sh b/deployment/bare-metal/install_dependencies.sh new file mode 100755 index 0000000..43feffc --- /dev/null +++ b/deployment/bare-metal/install_dependencies.sh @@ -0,0 +1,198 @@ +#!/bin/bash +# +# Copyright (c) 2024 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +set -e + +http_proxy=${http_proxy:-} +https_proxy=${https_proxy:-} +no_proxy=${no_proxy:-} + +function setup_proxy { + cat <<-EOF | sudo tee -a "/etc/environment" +http_proxy="${http_proxy}" +https_proxy="${https_proxy}" +no_proxy="${no_proxy}" +HTTP_PROXY="${http_proxy}" +HTTPS_PROXY="${https_proxy}" +NO_PROXY="${no_proxy}" +EOF + + + cat <<-EOF | sudo tee -a "/etc/profile.d/myenvvar.sh" +export http_proxy="${http_proxy}" +export https_proxy="${https_proxy}" +export no_proxy="${no_proxy}" +EOF + + sudo sh -c 'systemctl set-environment http_proxy="${http_proxy}"' + sudo sh -c 'systemctl set-environment https_proxy="${https_proxy}"' + sudo sh -c 'systemctl set-environment no_proxy="${no_proxy}"' +} + +function install_docker { + # install GPG key + install -m 0755 -d /etc/apt/keyrings + rm -f /etc/apt/keyrings/docker.gpg + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg + chmod a+r /etc/apt/keyrings/docker.gpg + + # install repo + echo \ + "deb [arch=\"$(dpkg --print-architecture)\" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + tee /etc/apt/sources.list.d/docker.list > /dev/null + apt-get update > /dev/null + + # install docker + apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + systemctl enable docker + + add_docker_proxy_for_builds + + # Add proxy for docker and containerd. This proxy is used in docker pull + + services=("containerd" "docker") + add_systemd_service_proxy "${services[@]}" +} + +function add_docker_proxy_for_builds() { + mkdir -p /home/tdx/.docker + cat <<-EOF | sudo tee "/home/tdx/.docker/config.json" +{ + "proxies": { + "default": { + "httpProxy": "${http_proxy}", + "httpsProxy": "${https_proxy}", + "noProxy": "${no_proxy}" + } + } +} +EOF +} + +function install_helm { + # install repo + curl -fsSL https://baltocdn.com/helm/signing.asc | gpg --dearmor | tee /usr/share/keyrings/helm.gpg > /dev/null + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | \ + tee /etc/apt/sources.list.d/helm-stable-debian.list > /dev/null + apt-get update > /dev/null + + # install helm + apt-get install -y helm +} + + +function install_pip { + # install python3-pip + apt install -y python3-pip +} + +function install_k3s { + curl -o run_k3s.sh https://get.k3s.io + chmod +x ./run_k3s.sh + + #configure proxy + local k3s_env_file="/etc/systemd/system/k3s.service.env" + cat <<-EOF | sudo tee -a $k3s_env_file +HTTP_PROXY="${http_proxy}" +HTTPS_PROXY="${https_proxy}" +NO_PROXY="${no_proxy}" +EOF + +} + +function install_k8s { + sudo -E bash -c 'apt-get -y clean' + sudo -E bash -c 'apt-get purge kubeadm kubectl kubelet' | true + + sudo apt-get install -y wget apt-transport-https ca-certificates curl gnupg + + wget https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key + gpg --no-default-keyring --keyring /tmp/k8s_keyring.gpg --import Release.key + gpg --no-default-keyring --keyring /tmp/k8s_keyring.gpg --export > /tmp/k8s.gpg + sudo bash -c 'mv /tmp/k8s.gpg /etc/apt/trusted.gpg.d/' + + echo 'deb https://pkgs.k8s.io/core:/stable:/v1.31/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list + sudo -E apt-get update + sudo -E apt install -y kubelet kubeadm kubectl + + # Packets traversing the bridge should be sent to iptables for processing + echo br_netfilter | sudo -E tee /etc/modules-load.d/k8s.conf + sudo -E bash -c 'echo "net.bridge.bridge-nf-call-ip6tables = 1" > /etc/sysctl.d/k8s.conf' + sudo -E bash -c 'echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.d/k8s.conf' + sudo -E bash -c 'echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.d/k8s.conf' + sudo -E sysctl --system + + # disable swap + swapoff -a + sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab + + services=("kubelet") + add_systemd_service_proxy "${services[@]}" +} + +function add_systemd_service_proxy() { + local components=("$@") + # Config proxy + local HTTPS_PROXY="$HTTPS_PROXY" + local https_proxy="$https_proxy" + if [ -z "$HTTPS_PROXY" ]; then + HTTPS_PROXY="$https_proxy" + fi + + local HTTP_PROXY="$HTTP_PROXY" + local http_proxy="$http_proxy" + if [ -z "$HTTP_PROXY" ]; then + HTTP_PROXY="$http_proxy" + fi + + local NO_PROXY="$NO_PROXY" + local no_proxy="$no_proxy" + if [ -z "$NO_PROXY" ]; then + NO_PROXY="$no_proxy" + fi + + if [[ -n $HTTP_PROXY ]] || [[ -n $HTTPS_PROXY ]] || [[ -n $NO_PROXY ]]; then + for component in "${components[@]}"; do + echo "component: " "${component}" + mkdir -p /etc/systemd/system/"${component}.service.d"/ + tee /etc/systemd/system/"${component}.service.d"/http-proxy.conf < /dev/null && pwd ) +GUEST_IMG="tdx-guest-ubuntu-24.04-intel.qcow2" + +create_tdx_image() { + # Install pre-requisites + sudo apt -y install git + sudo apt --no-install-recommends -y install qemu-utils guestfs-tools virtinst genisoimage libvirt-daemon-system libvirt-daemon + + rm -rf "${tdx_repo}" + mkdir "${tdx_repo}" + git clone "${tdx_repo_url}" "${tdx_repo}" + + pushd "${tdx_repo}/guest-tools/image" + # create tdx-guest-ubuntu-24.04-generic.qcow2 + sudo -E ./create-td-image.sh + popd + + # image is created under ${tdx_repo/guest-tools/image/tdx-guest-ubuntu-24.04-generic.qcow2" + img_name="tdx-guest-ubuntu-24.04-generic.qcow2" + mv "${tdx_repo}/guest-tools/image/${img_name}" /tmp/${GUEST_IMG} +} + +create_tdx_image_cc_image_writer() { + # TODO generate image using the cc-image-writer (https://github.com/cc-api/cvm-image-rewriter) + # TODO: clone and create plugin to install k8s dependencies + + sudo bash -c "rm -rf ${cima_repo}" + mkdir "${cima_repo}" + git clone "${cima_repo_url}" "${cima_repo}" + + pushd ${cima_repo}/tools/build + sudo -E ./build.sh + popd + + export GIT_SSL_NO_VERIFY=1 + rm -rf "${image_rewriter_repo}" + mkdir "${image_rewriter_repo}" + git clone "${image_rewriter_url}" "${image_rewriter_repo}" + + curl --output "${cloud_img}" "${cloud_img_url}" + + pushd "${image_rewriter_repo}" + touch plugins/98-ima-example/NOT_RUN + export CVM_TDX_GUEST_REPO="${cima_repo}/tools/build/output" + + # Need to resize the image to install TDX kernel and dependencies. + # Todo: Tweek the image size for optimal use. + GUEST_SIZE=6G ./run.sh -i "${cloud_img}" -t 15 + popd +} + +# This is meant to copy the script over from the cvm-image-writer repo +# to the current directory so that we can use that script to launch the TD VM. +copy_start_virt_script() { + cp "${image_rewriter_repo}/start-virt.sh" . +} + +setup_guest_image() { + sudo virt-customize -a ${image_rewriter_repo}/output.qcow2 \ + --mkdir /tmp/tdx/ \ + --copy-in ${CURR_DIR}/install_dependencies.sh:/tmp/tdx/ \ + --copy-in ${CURR_DIR}/create_k8s_node.sh:/home/tdx/ \ + --copy-in ${CURR_DIR}/kubeadm-config.yaml:/home/tdx/ \ + --run-command "http_proxy=${http_proxy} https_proxy=${https_proxy} no_proxy=${no_proxy} /tmp/tdx/install_dependencies.sh" + if [ $? -eq 0 ]; then + echo "Setup guest image..." + else + echo "Failed to setup guest image" + exit 1 + fi + mv "${image_rewriter_repo}/output.qcow2" "${CURR_DIR}/${GUEST_IMG}" +} + +check_tdx_version() { + # Check for tdx version in demesg? + # version seems to be relaible only on Ubuntu, not on Centos + local tdx_line=$(sudo sh -c 'dmesg | grep "TDX module"') + if [ -n "$tdx_line" ]; then + local major_version=$(echo $tdx_line | grep -oP 'major_version \K[0-9]+') + local minor_version=$(echo $tdx_line | grep -oP 'minor_version \K[0-9]+') + local build_date=$(echo $tdx_line | grep -oP 'build_date \K[0-9]+') + local build_num=$(echo $tdx_line | grep -oP 'build_num \K[0-9]+') + + echo "Major Version: $major_version" + echo "Minor Version: $minor_version" + echo "Build date: $build_date" + echo "Minor Version: $build_num" + else + echo "Could not determine TDX version" + fi +} + +install_pre_reqs() { + sudo -E apt install -y qemu-utils guestfs-tools virtinst genisoimage libvirt-daemon-system libvirt-daemon cloud-init + sudo usermod -aG libvirt $USER + sudo chmod o+r /boot/vmlinuz-* + + # Todo: the following configuration needs to be done just once. Check for user + # config first and then add the following. + cat <<-EOF | sudo tee -a "/etc/libvirt/qemu.conf" +user = "root" +group = "root" +dynamic_ownership = 0 + EOF + + sudo systemctl daemon-reload + sudo systemctl restart libvirtd +} + + +main() { + echo "http_proxy: " "${http_proxy}" + echo "https_proxy: " "${https_proxy}" + echo "no_proxy: " "${no_proxy}" + + launch_with_k3s=${launch_with_k3s:-false} + + # Check tdx version and warn if using obsolete + check_tdx_version + + # Install dependencies for building tdx image + install_pre_reqs + + # Creates a VM image with TDX support + create_tdx_image_cc_image_writer + + # Installs dependencies in the guest image including k3s, k8s, docker, helm + # and configures proxy for each one + setup_guest_image + + copy_start_virt_script +} + +main $@ +