Skip to content
This repository has been archived by the owner on Oct 21, 2020. It is now read-only.

Commit

Permalink
Merge pull request #1022 from johngmyers/nodeselector
Browse files Browse the repository at this point in the history
Add support for nodeSelector constraint for Helm DaemonSet template
  • Loading branch information
k8s-ci-robot authored Oct 11, 2018
2 parents fba71e6 + cebf93e commit e3b7353
Show file tree
Hide file tree
Showing 16 changed files with 230 additions and 50 deletions.
3 changes: 2 additions & 1 deletion local-volume/helm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,9 @@ provisioner chart and their default values.
| daemonset.serviceAccount | Provisioner DaemonSet service account. | str | `local-storage-admin` |
| daemonset.kubeConfigEnv | Specify the location of kubernetes config file. | str | `-` |
| daemonset.nodeLabels | List of node labels to be copied to the PVs created by the provisioner. | list | `-` |
| daemonset.nodeSelector | NodeSelector constraint on nodes eligible to run the provisioner. | map | `-` |
| daemonset.tolerations | List of tolerations to be applied to the Provisioner DaemonSet. | list | `-` |
| daemonset.resources | Map of resource request and limits to be applied to the Provisioner Daemonset. | map | `-` |
| daemonset.resources | Map of resource request and limits to be applied to the Provisioner Daemonset. | map | `-` |
Note: `classes` is a list of objects, you can specify one or more classes.

## Examples
Expand Down
20 changes: 20 additions & 0 deletions local-volume/helm/examples/baremetal-nodeselector.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
classes:
- name: local-storage
hostDir: /mnt/disks
blockCleanerCommand:
# Do a quick reset of the block device during its cleanup.
# - "/scripts/quick_reset.sh"
# or use dd to zero out block dev in two iterations by uncommenting these lines.
# - "/scripts/dd_zero.sh"
# - "2"
# or run shred utility for 2 iterations.
- "/scripts/shred.sh"
- "2"
# or blkdiscard utility by uncommenting the line below.
# - "/scripts/blkdiscard.sh"
volumeMode: Block
storageClass: true

daemonset:
nodeSelector:
localVolume: present
2 changes: 1 addition & 1 deletion local-volume/helm/provisioner/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
apiVersion: v1
version: 2.0.0
version: 2.3.0
description: local provisioner chart
name: provisioner
keywords:
Expand Down
4 changes: 4 additions & 0 deletions local-volume/helm/provisioner/templates/provisioner.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ spec:
app: local-volume-provisioner
spec:
serviceAccountName: {{.Values.daemonset.serviceAccount}}
{{- if .Values.daemonset.nodeSelector }}
nodeSelector:
{{ .Values.daemonset.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
{{- if .Values.daemonset.tolerations }}
tolerations:
{{ .Values.daemonset.tolerations | toYaml | trim | indent 8 }}
Expand Down
5 changes: 5 additions & 0 deletions local-volume/helm/provisioner/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,11 @@ daemonset:
# Defines a name of the service account which Provisioner will use to communicate with API server.
#
serviceAccount: local-storage-admin
# If configured, nodeSelector will add a nodeSelector field to the DaemonSet PodSpec.
#
# NodeSelector constraint for local-volume-provisioner scheduling to nodes.
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
#
# If configured KubeConfigEnv will (optionally) specify the location of kubeconfig file on the node.
# kubeConfigEnv: KUBECONFIG
Expand Down
14 changes: 7 additions & 7 deletions local-volume/helm/test/expected/baremetal-cleanbyjobs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
data:
useJobForCleaning: "yes"
storageClassMap: |
Expand Down Expand Up @@ -91,7 +91,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0

---
# Source: provisioner/templates/provisioner-cluster-role-binding.yaml
Expand All @@ -104,7 +104,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
Expand All @@ -122,7 +122,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
rules:
- apiGroups: [""]
resources: ["nodes"]
Expand All @@ -136,7 +136,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
Expand All @@ -154,7 +154,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
rules:
- apiGroups:
- 'batch'
Expand All @@ -171,7 +171,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
Expand Down
150 changes: 150 additions & 0 deletions local-volume/helm/test/expected/baremetal-nodeselector.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
---
# Source: provisioner/templates/provisioner.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: local-provisioner-config
namespace: default
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.3.0
data:
storageClassMap: |
local-storage:
hostDir: /mnt/disks
mountDir: /mnt/disks
blockCleanerCommand:
- "/scripts/shred.sh"
- "2"
volumeMode: Block
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: local-volume-provisioner
namespace: default
labels:
app: local-volume-provisioner
spec:
selector:
matchLabels:
app: local-volume-provisioner
template:
metadata:
labels:
app: local-volume-provisioner
spec:
serviceAccountName: local-storage-admin
nodeSelector:
localVolume: present
containers:
- image: "quay.io/external_storage/local-volume-provisioner:v2.2.0"
name: provisioner
securityContext:
privileged: true
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGE
value: "quay.io/external_storage/local-volume-provisioner:v2.2.0"
volumeMounts:
- mountPath: /etc/provisioner/config
name: provisioner-config
readOnly: true
- mountPath: /dev
name: provisioner-dev
- mountPath: /mnt/disks
name: local-storage
mountPropagation: "HostToContainer"
volumes:
- name: provisioner-config
configMap:
name: local-provisioner-config
- name: provisioner-dev
hostPath:
path: /dev
- name: local-storage
hostPath:
path: /mnt/disks
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete

---
# Source: provisioner/templates/provisioner-service-account.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
name: local-storage-admin
namespace: default
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.3.0

---
# Source: provisioner/templates/provisioner-cluster-role-binding.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-pv-binding
namespace: default
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: default
roleRef:
kind: ClusterRole
name: system:persistent-volume-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-storage-provisioner-node-clusterrole
namespace: default
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.3.0
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-node-binding
namespace: default
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: default
roleRef:
kind: ClusterRole
name: local-storage-provisioner-node-clusterrole
apiGroup: rbac.authorization.k8s.io

10 changes: 5 additions & 5 deletions local-volume/helm/test/expected/baremetal-resyncperiod.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
data:
minResyncPeriod: "1h"
storageClassMap: |
Expand Down Expand Up @@ -91,7 +91,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0

---
# Source: provisioner/templates/provisioner-cluster-role-binding.yaml
Expand All @@ -104,7 +104,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
Expand All @@ -122,7 +122,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
rules:
- apiGroups: [""]
resources: ["nodes"]
Expand All @@ -136,7 +136,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
Expand Down
10 changes: 5 additions & 5 deletions local-volume/helm/test/expected/baremetal-tolerations.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
data:
storageClassMap: |
local-storage:
Expand Down Expand Up @@ -94,7 +94,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0

---
# Source: provisioner/templates/provisioner-cluster-role-binding.yaml
Expand All @@ -107,7 +107,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
Expand All @@ -125,7 +125,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
rules:
- apiGroups: [""]
resources: ["nodes"]
Expand All @@ -139,7 +139,7 @@ metadata:
labels:
heritage: "Tiller"
release: "RELEASE-NAME"
chart: provisioner-2.0.0
chart: provisioner-2.3.0
subjects:
- kind: ServiceAccount
name: local-storage-admin
Expand Down
Loading

0 comments on commit e3b7353

Please sign in to comment.