Commit 10fdca71 authored by konfiot's avatar konfiot

Add elasticsearch + kibana + logstash

parent 522312d8
.git
# OWNERS file for Kubernetes
OWNERS
\ No newline at end of file
appVersion: 6.3.1
description: Flexible and powerful open source, distributed real-time search and analytics
engine.
home: https://www.elastic.co/products/elasticsearch
icon: https://static-www.elastic.co/assets/blteb1c97719574938d/logo-elastic-elasticsearch-lt.svg
maintainers:
- email: christian@jetstack.io
name: simonswine
- email: michael.haselton@gmail.com
name: icereval
- email: pete.brown@powerhrg.com
name: rendhalver
name: elasticsearch
sources:
- https://www.elastic.co/products/elasticsearch
- https://github.com/jetstack/elasticsearch-pet
- https://github.com/giantswarm/kubernetes-elastic-stack
- https://github.com/GoogleCloudPlatform/elasticsearch-docker
- https://github.com/clockworksoul/helm-elasticsearch
- https://github.com/pires/kubernetes-elasticsearch-cluster
version: 1.4.0
This diff is collapsed.
The elasticsearch cluster has been installed.
Elasticsearch can be accessed:
* Within your cluster, at the following DNS name at port 9200:
{{ template "elasticsearch.client.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
* From outside the cluster, run these commands in the same shell:
{{- if contains "NodePort" .Values.client.serviceType }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch.client.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.client.serviceType }}
WARNING: You have likely exposed your Elasticsearch cluster direct to the internet.
Elasticsearch does not implement any security for public facing clusters by default.
As a minimum level of security; switch to ClusterIP/NodePort and place an Nginx gateway infront of the cluster in order to lock down access to dangerous HTTP endpoints and verbs.
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "elasticsearch.client.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "elasticsearch.client.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:9200
{{- else if contains "ClusterIP" .Values.client.serviceType }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "elasticsearch.name" . }},component={{ .Values.client.name }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:9200 to use Elasticsearch"
kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 9200:9200
{{- end }}
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "elasticsearch.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified client name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.client.fullname" -}}
{{ template "elasticsearch.fullname" . }}-{{ .Values.client.name }}
{{- end -}}
{{/*
Create a default fully qualified data name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.data.fullname" -}}
{{ template "elasticsearch.fullname" . }}-{{ .Values.data.name }}
{{- end -}}
{{/*
Create a default fully qualified master name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "elasticsearch.master.fullname" -}}
{{ template "elasticsearch.fullname" . }}-{{ .Values.master.name }}
{{- end -}}
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
app: {{ template "elasticsearch.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.client.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "elasticsearch.client.fullname" . }}
spec:
replicas: {{ .Values.client.replicas }}
template:
metadata:
labels:
app: {{ template "elasticsearch.name" . }}
component: "{{ .Values.client.name }}"
release: {{ .Release.Name }}
{{- if .Values.client.podAnnotations }}
annotations:
{{ toYaml .Values.client.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.client.priorityClassName }}
priorityClassName: "{{ .Values.client.priorityClassName }}"
{{- end }}
securityContext:
fsGroup: 1000
{{- if eq .Values.client.antiAffinity "hard" }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: "{{ template "elasticsearch.name" . }}"
release: "{{ .Release.Name }}"
component: "{{ .Values.client.name }}"
{{- else if eq .Values.client.antiAffinity "soft" }}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "{{ template "elasticsearch.name" . }}"
release: "{{ .Release.Name }}"
component: "{{ .Values.client.name }}"
{{- end }}
{{- if .Values.client.nodeSelector }}
nodeSelector:
{{ toYaml .Values.client.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.client.tolerations }}
tolerations:
{{ toYaml .Values.client.tolerations | indent 8 }}
{{- end }}
initContainers:
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
# and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall
- name: "sysctl"
image: "busybox"
imagePullPolicy: "Always"
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
containers:
- name: elasticsearch
env:
- name: NODE_DATA
value: "false"
{{- if hasPrefix "5." .Values.appVersion }}
- name: NODE_INGEST
value: "false"
{{- end }}
- name: NODE_MASTER
value: "false"
- name: DISCOVERY_SERVICE
value: {{ template "elasticsearch.fullname" . }}-discovery.{{ .Release.Namespace }}.svc.{{ .Values.cluster.kubernetesDomain }}
- name: PROCESSORS
valueFrom:
resourceFieldRef:
resource: limits.cpu
- name: ES_JAVA_OPTS
value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.client.heapSize }} -Xmx{{ .Values.client.heapSize }}"
{{- range $key, $value := .Values.cluster.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
resources:
{{ toYaml .Values.client.resources | indent 12 }}
readinessProbe:
httpGet:
path: /_cluster/health
port: 9200
initialDelaySeconds: 5
livenessProbe:
httpGet:
path: /_cluster/health
port: 9200
initialDelaySeconds: 90
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
ports:
- containerPort: 9200
name: http
- containerPort: 9300
name: transport
volumeMounts:
- mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- if hasPrefix "2." .Values.image.tag }}
- mountPath: /usr/share/elasticsearch/config/logging.yml
name: config
subPath: logging.yml
{{- end }}
{{- if hasPrefix "5." .Values.image.tag }}
- mountPath: /usr/share/elasticsearch/config/log4j2.properties
name: config
subPath: log4j2.properties
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "elasticsearch.fullname" . }}
{{- if .Values.client.podDisruptionBudget.enabled }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
labels:
app: {{ template "elasticsearch.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.client.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "elasticsearch.client.fullname" . }}
spec:
{{- if .Values.client.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.client.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.client.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.client.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
app: {{ template "elasticsearch.name" . }}
component: "{{ .Values.client.name }}"
release: {{ .Release.Name }}
{{- end }}
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ template "elasticsearch.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.client.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "elasticsearch.client.fullname" . }}
{{- if .Values.client.serviceAnnotations }}
annotations:
{{ toYaml .Values.client.serviceAnnotations | indent 4 }}
{{- end }}
spec:
ports:
- name: http
port: 9200
targetPort: http
selector:
app: {{ template "elasticsearch.name" . }}
component: "{{ .Values.client.name }}"
release: {{ .Release.Name }}
type: {{ .Values.client.serviceType }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "elasticsearch.fullname" . }}
labels:
app: {{ template "elasticsearch.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
data:
elasticsearch.yml: |-
cluster.name: {{ .Values.cluster.name }}
node.data: ${NODE_DATA:true}
node.master: ${NODE_MASTER:true}
{{- if hasPrefix "5." .Values.appVersion }}
node.ingest: ${NODE_INGEST:true}
{{- else if hasPrefix "6." .Values.appVersion }}
node.ingest: ${NODE_INGEST:true}
{{- end }}
node.name: ${HOSTNAME}
network.host: 0.0.0.0
{{- if hasPrefix "2." .Values.appVersion }}
# see https://github.com/kubernetes/kubernetes/issues/3595
bootstrap.mlockall: ${BOOTSTRAP_MLOCKALL:false}
discovery:
zen:
ping.unicast.hosts: ${DISCOVERY_SERVICE:}
minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}
{{- else if hasPrefix "5." .Values.appVersion }}
# see https://github.com/kubernetes/kubernetes/issues/3595
bootstrap.memory_lock: ${BOOTSTRAP_MEMORY_LOCK:false}
discovery:
zen:
ping.unicast.hosts: ${DISCOVERY_SERVICE:}
minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}
{{- if .Values.cluster.xpackEnable }}
# see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html
xpack.ml.enabled: ${XPACK_ML_ENABLED:false}
xpack.monitoring.enabled: ${XPACK_MONITORING_ENABLED:false}
xpack.security.enabled: ${XPACK_SECURITY_ENABLED:false}
xpack.watcher.enabled: ${XPACK_WATCHER_ENABLED:false}
{{- end }}
{{- else if hasPrefix "6." .Values.appVersion }}
# see https://github.com/kubernetes/kubernetes/issues/3595
bootstrap.memory_lock: ${BOOTSTRAP_MEMORY_LOCK:false}
discovery:
zen:
ping.unicast.hosts: ${DISCOVERY_SERVICE:}
minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}
{{- if .Values.cluster.xpackEnable }}
# see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html
xpack.ml.enabled: ${XPACK_ML_ENABLED:false}
xpack.monitoring.enabled: ${XPACK_MONITORING_ENABLED:false}
xpack.security.enabled: ${XPACK_SECURITY_ENABLED:false}
xpack.watcher.enabled: ${XPACK_WATCHER_ENABLED:false}
{{- end }}
{{- end }}
# see https://github.com/elastic/elasticsearch-definitive-guide/pull/679
processors: ${PROCESSORS:}
# avoid split-brain w/ a minimum consensus of two masters plus a data node
gateway.expected_master_nodes: ${EXPECTED_MASTER_NODES:2}
gateway.expected_data_nodes: ${EXPECTED_DATA_NODES:1}
gateway.recover_after_time: ${RECOVER_AFTER_TIME:5m}
gateway.recover_after_master_nodes: ${RECOVER_AFTER_MASTER_NODES:2}
gateway.recover_after_data_nodes: ${RECOVER_AFTER_DATA_NODES:1}
{{- if .Values.cluster.config }}
{{ toYaml .Values.cluster.config | indent 4 }}
{{- end }}
{{- if hasPrefix "2." .Values.image.tag }}
logging.yml: |-
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
rootLogger: ${es.logger.level}, console
logger:
# log action execution errors for easier debugging
action: DEBUG
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
appender:
console:
type: console
layout:
type: consolePattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
{{- else if hasPrefix "5." .Values.image.tag }}
log4j2.properties: |-
status = error
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
logger.searchguard.name = com.floragunn
logger.searchguard.level = info
{{- else if hasPrefix "6." .Values.image.tag }}
log4j2.properties: |-
status = error
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
logger.searchguard.name = com.floragunn
logger.searchguard.level = info
{{- end }}
pre-stop-hook.sh: |-
#!/bin/bash
NODE_NAME=${HOSTNAME}
echo "Prepare to migrate data of the node ${NODE_NAME}"
echo "Move all data from node ${NODE_NAME}"
curl -s -XPUT -H 'Content-Type: application/json' 'localhost:9200/_cluster/settings' -d "{
\"transient\" :{
\"cluster.routing.allocation.exclude._name\" : \"${NODE_NAME}\"
}
}"
echo ""
while true ; do
echo -e "Wait for node ${NODE_NAME} to become empty"
SHARDS_ALLOCATION=$(curl -s -XGET 'http://localhost:9200/_cat/shards')
if ! echo "${SHARDS_ALLOCATION}" | grep -E "${NODE_NAME}"; then
break
fi
sleep 1
done
echo "Node ${NODE_NAME} is ready to shutdown"
post-start-hook.sh: |-
#!/bin/bash
NODE_NAME=${HOSTNAME}
CLUSTER_SETTINGS=$(curl -s -XGET "http://localhost:9200/_cluster/settings")
if echo "${CLUSTER_SETTINGS}" | grep -E "${NODE_NAME}"; then
echo "Activate node ${NODE_NAME}"
curl -s -XPUT -H 'Content-Type: application/json' "http://localhost:9200/_cluster/settings" -d "{
\"transient\" :{
\"cluster.routing.allocation.exclude._name\" : null
}
}"
fi
echo "Node ${NODE_NAME} is ready to be used"
\ No newline at end of file
{{- if .Values.data.podDisruptionBudget.enabled }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
labels:
app: {{ template "elasticsearch.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.data.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "elasticsearch.data.fullname" . }}
spec:
{{- if .Values.data.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.data.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.data.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.data.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
app: {{ template "elasticsearch.name" . }}
component: "{{ .Values.data.name }}"
release: {{ .Release.Name }}
{{- end }}
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
labels:
app: {{ template "elasticsearch.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.data.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "elasticsearch.data.fullname" . }}
spec:
serviceName: {{ template "elasticsearch.data.fullname" . }}
replicas: {{ .Values.data.replicas }}
template:
metadata:
labels:
app: {{ template "elasticsearch.name" . }}
component: "{{ .Values.data.name }}"
release: {{ .Release.Name }}
{{- if .Values.data.podAnnotations }}
annotations:
{{ toYaml .Values.data.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.data.priorityClassName }}
priorityClassName: "{{ .Values.data.priorityClassName }}"
{{- end }}
securityContext:
fsGroup: 1000
{{- if eq .Values.data.antiAffinity "hard" }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: "{{ template "elasticsearch.name" . }}"
release: "{{ .Release.Name }}"
component: "{{ .Values.data.name }}"
{{- else if eq .Values.data.antiAffinity "soft" }}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "{{ template "elasticsearch.name" . }}"
release: "{{ .Release.Name }}"
component: "{{ .Values.data.name }}"
{{- end }}
{{- if .Values.data.nodeSelector }}
nodeSelector:
{{ toYaml .Values.data.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.data.tolerations }}
tolerations:
{{ toYaml .Values.data.tolerations | indent 8 }}
{{- end }}
initContainers:
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
# and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall
- name: "sysctl"
image: "busybox"
imagePullPolicy: "Always"
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: "chown"
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
command:
- /bin/bash
- -c
- chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/data &&
chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/logs
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: data
containers:
- name: elasticsearch
env:
- name: DISCOVERY_SERVICE
value: {{ template "elasticsearch.fullname" . }}-discovery.{{ .Release.Namespace }}.svc.{{ .Values.cluster.kubernetesDomain }}
- name: NODE_MASTER
value: "false"
- name: PROCESSORS
valueFrom:
resourceFieldRef:
resource: limits.cpu
- name: ES_JAVA_OPTS
value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.data.heapSize }} -Xmx{{ .Values.data.heapSize }}"
{{- range $key, $value := .Values.cluster.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
ports:
- containerPort: 9300
name: transport
{{ if .Values.data.exposeHttp }}
- containerPort: 9200
name: http
{{ end }}
resources:
{{ toYaml .Values.data.resources | indent 12 }}
readinessProbe:
httpGet:
path: /_cluster/health?local=true
port: 9200
initialDelaySeconds: 5
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: data
- mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
name: config
subPath: elasticsearch.yml
{{- if hasPrefix "2." .Values.image.tag }}
- mountPath: /usr/share/elasticsearch/config/logging.yml
name: config
subPath: logging.yml
{{- end }}
{{- if hasPrefix "5." .Values.image.tag }}
- mountPath: /usr/share/elasticsearch/config/log4j2.properties
name: config
subPath: log4j2.properties
{{- end }}
- name: config
mountPath: /pre-stop-hook.sh
subPath: pre-stop-hook.sh
- name: config
mountPath: /post-start-hook.sh
subPath: post-start-hook.sh
lifecycle:
preStop:
exec:
command: ["/bin/bash","/pre-stop-hook.sh"]
postStart:
exec:
command: ["/bin/bash","/post-start-hook.sh"]
terminationGracePeriodSeconds: {{ .Values.data.terminationGracePeriodSeconds }}
volumes:
- name: config
configMap:
name: {{ template "elasticsearch.fullname" . }}
{{- if not .Values.data.persistence.enabled }}
- name: data
emptyDir: {}
{{- else }}
volumeClaimTemplates:
- metadata:
name: {{ .Values.data.persistence.name }}
spec:
accessModes:
- {{ .Values.data.persistence.accessMode | quote }}
{{- if .Values.data.persistence.storageClass }}
{{- if (eq "-" .Values.data.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.data.persistence.storageClass }}"
{{- end }}
{{- end }}
resources:
requests:
storage: "{{ .Values.data.persistence.size }}"
{{- end }}
{{- if .Values.master.podDisruptionBudget.enabled }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
labels:
app: {{ template "elasticsearch.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
component: "{{ .Values.master.name }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "elasticsearch.master.fullname" . }}
spec:
{{- if .Values.master.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.master.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.master.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.master.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector: