-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathkind.sh
executable file
·93 lines (79 loc) · 2.26 KB
/
kind.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#!/bin/bash
# based on
# https://github.com/hyperledger-labs/fabric-operator/blob/cc26bdb71599428a2879b40274376dde37f341a5/sample-network-multi-org/scripts/kind_with_nginx.sh#L1
set -eo pipefail
set -x
KIND_CLUSTER_IMAGE=kindest/node:v1.24.4
cat << EOF | kind create cluster --name kind --image $KIND_CLUSTER_IMAGE --config=-
---
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP
networking:
apiServerAddress: 127.0.0.1
apiServerPort: 8888
EOF
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/v1.0.12/deployments/kubernetes/reloader.yaml
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml
sleep 5
kubectl -n cert-manager rollout status deploy/cert-manager
kubectl -n cert-manager rollout status deploy/cert-manager-cainjector
kubectl -n cert-manager rollout status deploy/cert-manager-webhook
kubectl apply -k kind/nginx
sleep 10
kubectl wait \
--namespace ingress-nginx \
--for=condition=ready pod \
--selector=app.kubernetes.io/component=controller \
--timeout=3m
CLUSTER_IP=$(kubectl -n ingress-nginx get svc ingress-nginx-controller -o json | jq -r .spec.clusterIP)
cat << EOF | kubectl apply -f -
---
kind: ConfigMap
apiVersion: v1
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
rewrite name regex (.*)\.localho\.st host.ingress.internal
hosts {
${CLUSTER_IP} host.ingress.internal
fallthrough
}
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
EOF
kubectl -n kube-system rollout restart deployment/coredns