@@ -235,6 +235,121 @@ jobs:
235
235
- name : " Show podman images information"
236
236
run : podman images --digests
237
237
238
+ - name : " Check if we have tests or not"
239
+ id : have-tests
240
+ run : " ci/cached-builds/has_tests.py --target ${{ inputs.target }}"
241
+
242
+ # https://cri-o.io/
243
+ - name : Install cri-o
244
+ if : ${{ steps.have-tests.outputs.tests == 'true' }}
245
+ run : |
246
+ set -Eeuxo pipefail
247
+
248
+ sudo apt-get update
249
+ sudo apt-get install -y software-properties-common curl
250
+
251
+ curl -fsSL https://pkgs.k8s.io/core:/stable:/$KUBERNETES_VERSION/deb/Release.key | \
252
+ sudo gpg --dearmor --batch --yes -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
253
+
254
+ echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/$KUBERNETES_VERSION/deb/ /" | \
255
+ sudo tee /etc/apt/sources.list.d/kubernetes.list
256
+
257
+ curl -fsSL https://pkgs.k8s.io/addons:/cri-o:/stable:/$CRIO_VERSION/deb/Release.key | \
258
+ sudo gpg --dearmor --batch --yes -o /etc/apt/keyrings/cri-o-apt-keyring.gpg
259
+
260
+ echo "deb [signed-by=/etc/apt/keyrings/cri-o-apt-keyring.gpg] https://pkgs.k8s.io/addons:/cri-o:/stable:/$CRIO_VERSION/deb/ /" | \
261
+ sudo tee /etc/apt/sources.list.d/cri-o.list
262
+
263
+ sudo apt-get update
264
+ sudo apt-get install -y cri-o kubelet kubeadm kubectl
265
+
266
+ # make use of /etc/cni/net.d/11-crio-ipv4-bridge.conflist so we don't
267
+ # need a pod network and just use the default bridge
268
+ sudo rm -rf /etc/cni/net.d/*
269
+ # cat /etc/cni/net.d/11-crio-ipv4-bridge.conflist
270
+ # https://github.com/containerd/containerd/blob/main/script%2Fsetup%2Finstall-cni
271
+ # https://www.cni.dev/plugins/current/main/bridge/
272
+ sudo cp ci/cached-builds/11-crio-ipv4-bridge.conflist /etc/cni/net.d/11-crio-ipv4-bridge.conflist
273
+
274
+ sudo cp ci/cached-builds/crio.conf /etc/crio/crio.conf.d/
275
+
276
+ sudo systemctl start crio.service
277
+ env :
278
+ CRIO_VERSION : v1.30
279
+ KUBERNETES_VERSION : v1.30
280
+
281
+ - name : Show crio debug data (on failure)
282
+ if : ${{ failure() && steps.have-tests.outputs.tests == 'true' }}
283
+ run : |
284
+ set -Eeuxo pipefail
285
+
286
+ sudo systemctl status crio.service || true
287
+ sudo journalctl -xeu crio.service
288
+
289
+ # do this early, it's a good check that cri-o is not completely broken
290
+ - name : " Show crio images information"
291
+ if : ${{ steps.have-tests.outputs.tests == 'true' }}
292
+ run : sudo crictl images
293
+
294
+ - name : Install Kubernetes cluster
295
+ if : ${{ steps.have-tests.outputs.tests == 'true' }}
296
+ run : |
297
+ set -Eeuxo pipefail
298
+
299
+ sudo swapoff -a
300
+ sudo modprobe br_netfilter
301
+ sudo sysctl -w net.ipv4.ip_forward=1
302
+
303
+ # Was getting strange DNS resolution errors from pods that don't seem to want to go away sometimes:
304
+ # Resolving raw.githubusercontent.com (raw.githubusercontent.com)... failed: Name or service not known.
305
+ # wget: unable to resolve host address ‘raw.githubusercontent.com’
306
+ # Here's what helped:
307
+ # https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/#known-issues
308
+ # https://github.com/kubernetes/kubernetes/blob/e4c1f980b76fecece30c2f77885a7117192170a6/CHANGELOG/CHANGELOG-1.30.md?plain=1#L1454
309
+ # https://github.com/canonical/microk8s/issues/68#issuecomment-404923563
310
+ sudo ufw allow in on cni0
311
+ sudo ufw allow out on cni0
312
+ sudo ufw default allow routed
313
+ sudo iptables -P FORWARD ACCEPT
314
+ sudo iptables -t nat -A POSTROUTING -s 10.85.0.0/16 -o eth0 -j MASQUERADE
315
+
316
+ # https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm
317
+ sudo kubeadm init --config=ci/cached-builds/kubeadm.yaml
318
+
319
+ mkdir -p $HOME/.kube
320
+ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
321
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config
322
+
323
+ - name : Show kubelet debug data (on failure)
324
+ if : ${{ failure() && steps.have-tests.outputs.tests == 'true' }}
325
+ run : |
326
+ set -Eeuxo pipefail
327
+
328
+ sudo systemctl status kubelet || true
329
+ sudo journalctl -xeu kubelet
330
+
331
+ # Here is one example how you may list all running Kubernetes containers by using crictl:
332
+ sudo crictl --runtime-endpoint unix:///var/run/crio/crio.sock ps -a | grep kube | grep -v pause
333
+ # Once you have found the failing container, you can inspect its logs with:
334
+ # crictl --runtime-endpoint unix:///var/run/crio/crio.sock logs CONTAINERID
335
+
336
+ - name : Untaint the master
337
+ if : ${{ steps.have-tests.outputs.tests == 'true' }}
338
+ run : kubectl taint nodes --all node-role.kubernetes.io/control-plane-
339
+
340
+ - name : Show nodes status and wait for readiness
341
+ if : ${{ steps.have-tests.outputs.tests == 'true' }}
342
+ run : |
343
+ kubectl describe nodes
344
+ kubectl wait --for=condition=Ready nodes --all --timeout=100s || (kubectl describe nodes && false)
345
+
346
+ - name : Wait for pods to be running
347
+ if : ${{ steps.have-tests.outputs.tests == 'true' }}
348
+ run : |
349
+ set -Eeuxo pipefail
350
+ kubectl wait deployments --all --all-namespaces --for=condition=Available --timeout=100s
351
+ kubectl wait pods --all --all-namespaces --for=condition=Ready --timeout=100s
352
+
238
353
- name : Run Trivy vulnerability scanner
239
354
if : ${{ steps.resolve-target.outputs.target }}
240
355
run : |
0 commit comments