--- apiVersion: v1 kind: Namespace metadata: name: node-labeler --- apiVersion: v1 kind: ConfigMap metadata: name: labeler-script namespace: node-labeler data: run.sh: | #!/usr/bin/env sh while true; do kubectl get node $1 -o=jsonpath='{.metadata.labels}' | grep -q node-role.kubernetes.io/worker || kubectl label node $1 node-role.kubernetes.io/worker= kubectl get node $1 -o=jsonpath='{.metadata.labels}' | grep -q node.longhorn.io/create-default-disk || kubectl label node $1 node.longhorn.io/create-default-disk='config' kubectl get node $1 -o=jsonpath='{.metadata.annotations}' | grep -q node.longhorn.io/default-disks-config || kubectl annotate node $1 node.longhorn.io/default-disks-config='[ { "path":"/storage1", "allowScheduling":true }, { "name":"storage2", "path":"/storage2", "allowScheduling":true, "storageReserved":0 }]' sleep 60 done --- apiVersion: apps/v1 kind: DaemonSet metadata: name: worker-node-labeler namespace: node-labeler spec: selector: matchLabels: role: worker-node-labeler template: metadata: labels: role: worker-node-labeler spec: tolerations: - key: node-role.kubernetes.io/master operator: Exists effect: NoSchedule volumes: - name: labeler-script configMap: name: labeler-script defaultMode: 0777 containers: - name: labeler image: bitnami/kubectl env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName command: ["/bin/sh", "-c", "/labeler-script/run.sh $(NODE_NAME)"] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /labeler-script name: labeler-script restartPolicy: Always --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: worker-node-labeler-role namespace: node-labeler rules: - apiGroups: [""] resources: ["nodes"] verbs: ["get", "patch", "list"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: worker-node-labeler-binding namespace: node-labeler subjects: - kind: ServiceAccount name: default namespace: node-labeler roleRef: kind: ClusterRole name: worker-node-labeler-role apiGroup: rbac.authorization.k8s.io