Commit 522312d8 authored by konfiot's avatar konfiot

Add rook namespace

parent 4409a99c
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
*.tmpl
apiVersion: v1
description: File, Block, and Object Storage Services for your Cloud-Native Environment
icon: https://rook.io/images/logos/rook/rook-logo-color-on-transparent.png
name: rook-ceph
sources:
- https://github.com/rook/rook
version: v0.8.0
See the [Operator Helm Chart](/Documentation/helm-operator.md) documentation.
The Rook Operator has been installed. Check its status by running:
kubectl --namespace {{ .Release.Namespace }} get pods -l "app=rook-ceph-operator"
Visit https://rook.io/docs/rook/master for instructions on how
to create & configure Rook clusters
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- if .Values.rbacEnable }}
# The cluster role for managing all the cluster-specific resources in a namespace
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-cluster-mgmt
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
resources:
- secrets
- pods
- services
- configmaps
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- extensions
resources:
- deployments
- daemonsets
- replicasets
verbs:
- get
- list
- watch
- create
- update
- delete
---
# The cluster role for managing the Rook CRDs
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-global
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
resources:
# Pod access is needed for fencing
- pods
# Node access is needed for determining nodes where mons should run
- nodes
- nodes/proxy
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
# PVs and PVCs are managed by the Rook provisioner
- persistentvolumes
- persistentvolumeclaims
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- ceph.rook.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- rook.io
resources:
- "*"
verbs:
- "*"
{{- if .Values.pspEnable }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: rook-ceph-system-psp-user
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
rules:
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- 00-rook-ceph-operator
verbs:
- use
{{- end }}
{{- end }}
{{- if .Values.rbacEnable }}
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-global
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-global
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
{{- if .Values.pspEnable }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: rook-ceph-system-psp-users
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-ceph-system-psp-user
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: rook-ceph-operator
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
replicas: 1
selector:
matchLabels:
app: rook-ceph-operator
template:
metadata:
labels:
app: rook-ceph-operator
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
containers:
- name: rook-ceph-operator
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: ["ceph", "operator"]
env:
{{- if not .Values.rbacEnable }}
- name: RBAC_ENABLED
value: "false"
{{- end }}
{{- if .Values.agent }}
{{- if .Values.agent.toleration }}
- name: AGENT_TOLERATION
value: {{ .Values.agent.toleration }}
{{- end }}
{{- if .Values.agent.tolerationKey }}
- name: AGENT_TOLERATION_KEY
value: {{ .Values.agent.tolerationKey }}
{{- end }}
{{- if .Values.agent.flexVolumeDirPath }}
- name: FLEXVOLUME_DIR_PATH
value: {{ .Values.agent.flexVolumeDirPath }}
{{- end }}
{{- end }}
{{- if .Values.discover }}
{{- if .Values.discover.toleration }}
- name: DISCOVER_TOLERATION
value: {{ .Values.agent.toleration }}
{{- end }}
{{- if .Values.discover.tolerationKey }}
- name: DISCOVER_TOLERATION_KEY
value: {{ .Values.discover.tolerationKey }}
{{- end }}
{{- end }}
- name: ROOK_LOG_LEVEL
value: {{ .Values.logLevel }}
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.mon }}
{{- if .Values.mon.healthCheckInterval }}
- name: ROOK_MON_HEALTHCHECK_INTERVAL
value: {{ .Values.mon.healthCheckInterval }}
{{- end }}
{{- if .Values.mon.monOutTimeout }}
- name: ROOK_MON_OUT_TIMEOUT
value: {{ .Values.mon.monOutTimeout }}
{{- end }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.rbacEnable }}
serviceAccountName: rook-ceph-system
{{- end }}
{{- if .Values.pspEnable }}
# PSP for rook-ceph-operator
# Most of the teams follow the kubernetes docs and have these PSPs.
# * privileged (for kube-system namespace)
# * restricted (for all logged in users)
#
# If we name it as `rook-ceph-operator`, it comes next to `restricted` PSP alphabetically,
# and applies `restricted` capabilities to `rook-system`. Thats reason this is named with `00-rook-ceph-operator`,
# so it stays somewhere close to top and `rook-system` gets the intended PSP.
#
# More info on PSP ordering : https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: 00-rook-ceph-operator
spec:
fsGroup:
rule: RunAsAny
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- '*'
allowedCapabilities:
- '*'
hostPID: true
hostIPC: true
hostNetwork: true
{{- end }}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusters.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: Cluster
listKind: ClusterList
plural: clusters
singular: cluster
shortNames:
- rcc
scope: Namespaced
version: v1beta1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: filesystems.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: Filesystem
listKind: FilesystemList
plural: filesystems
singular: filesystem
shortNames:
- rcfs
scope: Namespaced
version: v1beta1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: objectstores.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: ObjectStore
listKind: ObjectStoreList
plural: objectstores
singular: objectstore
shortNames:
- rco
scope: Namespaced
version: v1beta1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: pools.ceph.rook.io
spec:
group: ceph.rook.io
names:
kind: Pool
listKind: PoolList
plural: pools
singular: pool
shortNames:
- rcp
scope: Namespaced
version: v1beta1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: volumes.rook.io
spec:
group: rook.io
names:
kind: Volume
listKind: VolumeList
plural: volumes
singular: volume
shortNames:
- rv
scope: Namespaced
version: v1alpha2
---
{{- if .Values.rbacEnable }}
# The role for the operator to manage resources in the system namespace
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: rook-ceph-system
labels:
operator: rook
storage-backend: ceph
rules:
- apiGroups:
- ""
resources:
- pods
- configmaps
verbs:
- get
- list
- watch
- patch
- create
- update
- delete
- apiGroups:
- extensions
resources:
- daemonsets
verbs:
- get
- list
- watch
- create
- update
- delete
{{- end }}
\ No newline at end of file
{{- if .Values.rbacEnable }}
# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
labels:
operator: rook
storage-backend: ceph
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rook-ceph-system
subjects:
- kind: ServiceAccount
name: rook-ceph-system
namespace: {{ .Release.Namespace }}
{{- end }}
\ No newline at end of file
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-system
labels:
operator: rook
storage-backend: ceph
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
# Default values for rook-ceph-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
prefix: rook
repository: rook/ceph
tag: v0.8.0
pullPolicy: IfNotPresent
hyperkube:
repository: k8s.gcr.io/hyperkube
tag: v1.7.12
pullPolicy: IfNotPresent
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
nodeSelector:
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# disktype: ssd
# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
tolerations: []
mon:
healthCheckInterval: "45s"
monOutTimeout: "300s"
## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL
logLevel: INFO
## If true, create & use RBAC resources
##
rbacEnable: true
## If true, create & use PSP resources
##
pspEnable: true
## Rook Agent configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## flexVolumeDirPath: The path where the Rook agent discovers the flex volume plugins
# agent:
# toleration: NoSchedule
# tolerationKey: key
## For Kubernetes >= 1.9.x flexVolumeDirPath should be changed to /var/lib/kubelet/volumeplugins/
# flexVolumeDirPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
# discover:
# toleration: NoSchedule
# tolerationKey: key
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment